mirror of
https://github.com/fafhrd91/actix-web
synced 2025-08-19 20:35:36 +02:00
Compare commits
881 Commits
v1.0.0-alp
...
3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bc54cdd772 | ||
|
|
ad7e3c06e7 | ||
|
|
0669ed0f06 | ||
|
|
c9c36679e4 | ||
|
|
655d7b4f05 | ||
|
|
24d525d978 | ||
|
|
1f70ef155d | ||
|
|
7981e0068a | ||
|
|
32d59ca904 | ||
|
|
ea8bf36104 | ||
|
|
0b5b463cfa | ||
|
|
fe6ad816cc | ||
|
|
e72b787ba7 | ||
|
|
efc317d3b0 | ||
|
|
31057becca | ||
|
|
f1a9b45437 | ||
|
|
5af46775b8 | ||
|
|
70f4747a23 | ||
|
|
2f11ef089b | ||
|
|
4100c50c70 | ||
|
|
a929209967 | ||
|
|
49e945c88f | ||
|
|
9b42333fac | ||
|
|
e5b86d189c | ||
|
|
4bfd5c2781 | ||
|
|
9b6a089b36 | ||
|
|
ceac97bb8d | ||
|
|
61b65aa64a | ||
|
|
5468c3c410 | ||
|
|
b6385c2b4e | ||
|
|
5135c1e3a0 | ||
|
|
22b451cf2d | ||
|
|
42f51eb962 | ||
|
|
156c97cef2 | ||
|
|
798d744eef | ||
|
|
4cb833616a | ||
|
|
9963a5ef54 | ||
|
|
4519db36b2 | ||
|
|
7030bf5fe8 | ||
|
|
20078fe603 | ||
|
|
06e5042b94 | ||
|
|
41e7cec72f | ||
|
|
d45a1aa6b6 | ||
|
|
98243db9f1 | ||
|
|
f92742bdac | ||
|
|
e563025b16 | ||
|
|
cfd5b381f1 | ||
|
|
2f84914146 | ||
|
|
d765e9099d | ||
|
|
34b23f31c9 | ||
|
|
26c1a901d9 | ||
|
|
c2c71cc626 | ||
|
|
aa11231ee5 | ||
|
|
b5812b15f0 | ||
|
|
b4e02fe29a | ||
|
|
37c76a39ab | ||
|
|
60e7e52276 | ||
|
|
c53e9468bc | ||
|
|
162121bf8d | ||
|
|
f7bcad9567 | ||
|
|
f9e3f78e45 | ||
|
|
1596893ef7 | ||
|
|
2a2474ca09 | ||
|
|
509b2e6eec | ||
|
|
d707704556 | ||
|
|
a429ee6646 | ||
|
|
7f8073233a | ||
|
|
4b4c9d1b93 | ||
|
|
3fde3be3d8 | ||
|
|
f861508789 | ||
|
|
a4546f02d2 | ||
|
|
64a2c13cdf | ||
|
|
bf53fe5a22 | ||
|
|
cf5138e740 | ||
|
|
121075c1ef | ||
|
|
22089aff87 | ||
|
|
7787638f26 | ||
|
|
2f6e9738c4 | ||
|
|
e39d166a17 | ||
|
|
059d1671d7 | ||
|
|
3a27580ebe | ||
|
|
9d0534999d | ||
|
|
c54d73e0bb | ||
|
|
9a9d4b182e | ||
|
|
4e321595bc | ||
|
|
01cbef700f | ||
|
|
8497b5f490 | ||
|
|
75d86a6beb | ||
|
|
3892a95c11 | ||
|
|
5802eb797f | ||
|
|
ff2ca0f420 | ||
|
|
59ad1738e9 | ||
|
|
aa2bd6fbfb | ||
|
|
5aad8e24c7 | ||
|
|
6e97bc09f8 | ||
|
|
160995b8d4 | ||
|
|
187646b2f9 | ||
|
|
46627be36f | ||
|
|
a78380739e | ||
|
|
cf1c8abe62 | ||
|
|
92b5bcd13f | ||
|
|
701bdacfa2 | ||
|
|
6dc47c4093 | ||
|
|
0ec335a39c | ||
|
|
f8d5ad6b53 | ||
|
|
43c362779d | ||
|
|
971ba3eee1 | ||
|
|
2fd96c03e5 | ||
|
|
ad7c6d2633 | ||
|
|
3362a3d61b | ||
|
|
769ea6bd5b | ||
|
|
1382094c15 | ||
|
|
78594a72bd | ||
|
|
327e472e44 | ||
|
|
e10eb648d9 | ||
|
|
a2662b928b | ||
|
|
84583799be | ||
|
|
08f9a34075 | ||
|
|
056803d534 | ||
|
|
deab634247 | ||
|
|
0b641a2db2 | ||
|
|
f2d641b772 | ||
|
|
23c8191cca | ||
|
|
487f90be5b | ||
|
|
fa28175a74 | ||
|
|
a70e599ff5 | ||
|
|
c11052f826 | ||
|
|
73ec01e83b | ||
|
|
a7c8533291 | ||
|
|
eb0eda69c6 | ||
|
|
dc74db1f2f | ||
|
|
0ba73fc44c | ||
|
|
9af07d66ae | ||
|
|
e72ee28232 | ||
|
|
4f9a1ac3b7 | ||
|
|
6c5c4ea230 | ||
|
|
a79450482c | ||
|
|
5286b8aed7 | ||
|
|
621ebec01a | ||
|
|
5e5c8b1c83 | ||
|
|
322e7c15d1 | ||
|
|
7aa757ad5a | ||
|
|
482f74e409 | ||
|
|
19967c41cc | ||
|
|
8a106c07b4 | ||
|
|
11a9fdad95 | ||
|
|
75a34dc8bc | ||
|
|
5b60ee8cfd | ||
|
|
bb89d04080 | ||
|
|
f57b5659da | ||
|
|
4a955c425d | ||
|
|
905f86b540 | ||
|
|
0348114ad8 | ||
|
|
9c5a2d6580 | ||
|
|
2550f00702 | ||
|
|
7d8fb631a0 | ||
|
|
b9e268e95f | ||
|
|
6dd78d9355 | ||
|
|
fe89ba7027 | ||
|
|
5d39110470 | ||
|
|
5bde4e2529 | ||
|
|
9b72d33b79 | ||
|
|
0f826fd11a | ||
|
|
cf92cfa777 | ||
|
|
9cfb32c780 | ||
|
|
48fa78e182 | ||
|
|
184683a698 | ||
|
|
6c78f57a70 | ||
|
|
603973dede | ||
|
|
8391427905 | ||
|
|
2314dc30b5 | ||
|
|
864fc489ce | ||
|
|
c48af0c822 | ||
|
|
50adbdecbe | ||
|
|
f8f5a82f40 | ||
|
|
9a7f93610a | ||
|
|
2dac9afc4e | ||
|
|
74491dca59 | ||
|
|
81b0c32062 | ||
|
|
a98e53ecb8 | ||
|
|
d7abbff3b0 | ||
|
|
24372467d9 | ||
|
|
fc8e07b947 | ||
|
|
ab4d8704f1 | ||
|
|
292af145cb | ||
|
|
9bd6407730 | ||
|
|
245dd471dd | ||
|
|
32a37b7282 | ||
|
|
426a9b5d4d | ||
|
|
7e8ea44d5c | ||
|
|
b0866a8a0f | ||
|
|
7fe426f626 | ||
|
|
433a4563cf | ||
|
|
f3b0233477 | ||
|
|
201090d7a2 | ||
|
|
4fc99d4a6f | ||
|
|
92ce975d87 | ||
|
|
996f1d7eae | ||
|
|
63864ecf9e | ||
|
|
bbd4d19830 | ||
|
|
879cad9422 | ||
|
|
6e8ff5c905 | ||
|
|
b66c3083a5 | ||
|
|
b6b3481c6f | ||
|
|
574714d156 | ||
|
|
54abf356d4 | ||
|
|
9cb3b0ef58 | ||
|
|
9d0c80b6ce | ||
|
|
0bc4a5e703 | ||
|
|
9d94fb91b2 | ||
|
|
9164ed1f0c | ||
|
|
b521e9b221 | ||
|
|
f37cb6dd0b | ||
|
|
d5ceae2074 | ||
|
|
c27d3fad8e | ||
|
|
bb17280f51 | ||
|
|
b047413b39 | ||
|
|
ce24630d31 | ||
|
|
751253f23e | ||
|
|
5b0f7fff69 | ||
|
|
54619cb680 | ||
|
|
5b36381cb0 | ||
|
|
45e2e40140 | ||
|
|
df3f722589 | ||
|
|
e7ba871bbf | ||
|
|
ebc2e67015 | ||
|
|
74ddc852c8 | ||
|
|
dfaa330a94 | ||
|
|
0ad02ee0e0 | ||
|
|
aaff68bf05 | ||
|
|
fcb1dec235 | ||
|
|
7b7daa75a4 | ||
|
|
2067331884 | ||
|
|
bf630d9475 | ||
|
|
146ae4da18 | ||
|
|
52c5755d56 | ||
|
|
5548c57a09 | ||
|
|
0d958fabd7 | ||
|
|
c67e4c1fe8 | ||
|
|
4875dfbec7 | ||
|
|
d602a7e386 | ||
|
|
9f196fe5a5 | ||
|
|
e4adcd1935 | ||
|
|
7e0d898d5a | ||
|
|
51518721e5 | ||
|
|
c02d3e205b | ||
|
|
a253d7d3ce | ||
|
|
0152cedc5d | ||
|
|
a6a47b95ee | ||
|
|
1b28a5d48b | ||
|
|
c147b94832 | ||
|
|
95f9a12a5e | ||
|
|
73eeab0e90 | ||
|
|
ce1e996b41 | ||
|
|
e718f65121 | ||
|
|
a9a475d555 | ||
|
|
b93e1555ec | ||
|
|
6f33b7ea42 | ||
|
|
294523a32f | ||
|
|
6b626c7d77 | ||
|
|
5da9e277a2 | ||
|
|
0d5646a8b6 | ||
|
|
7941594f94 | ||
|
|
6f63acaf01 | ||
|
|
7172885beb | ||
|
|
cf721c5fff | ||
|
|
10e3e72595 | ||
|
|
a7d805aab7 | ||
|
|
e90950fee1 | ||
|
|
c8f0672ef7 | ||
|
|
9d661dc4f3 | ||
|
|
687dc609dd | ||
|
|
b9b52079e0 | ||
|
|
117d28f7ba | ||
|
|
795a575fc5 | ||
|
|
b4d63667df | ||
|
|
3dc859af58 | ||
|
|
1fa02b5f1c | ||
|
|
c9fdcc596d | ||
|
|
6cc83dbb67 | ||
|
|
3b675c9125 | ||
|
|
15a2587887 | ||
|
|
0173f99726 | ||
|
|
f27dd19093 | ||
|
|
7ba14fd113 | ||
|
|
903ae47baa | ||
|
|
95c18dbdf3 | ||
|
|
d3ccf46e92 | ||
|
|
cd1765035c | ||
|
|
ea28219d0f | ||
|
|
77058ef779 | ||
|
|
e5f2feec45 | ||
|
|
0a86907dd2 | ||
|
|
78749a4b7e | ||
|
|
de815dd99c | ||
|
|
e6078bf792 | ||
|
|
a84b37199a | ||
|
|
c05f9475c5 | ||
|
|
69dab0063c | ||
|
|
ec5c779732 | ||
|
|
2e2ea7ab80 | ||
|
|
eeebc653fd | ||
|
|
835a00599c | ||
|
|
d9c415e540 | ||
|
|
09a391a3ca | ||
|
|
62aba424e2 | ||
|
|
9d04b250f9 | ||
|
|
a4148de226 | ||
|
|
48ef4d7a26 | ||
|
|
71c4bd1b30 | ||
|
|
de1d6ad5cb | ||
|
|
2a72e8d119 | ||
|
|
2a8e5fdc73 | ||
|
|
b213c07799 | ||
|
|
3d6b8686ad | ||
|
|
a4f87a53da | ||
|
|
08f172a0aa | ||
|
|
7792eaa16e | ||
|
|
845ce3cf34 | ||
|
|
7daef22e24 | ||
|
|
1249262c35 | ||
|
|
94da08f506 | ||
|
|
d143c44130 | ||
|
|
8ec8ccf4fb | ||
|
|
c8ccc69b93 | ||
|
|
f9f9fb4c84 | ||
|
|
1b77963aac | ||
|
|
036ffd43f9 | ||
|
|
bdccccd536 | ||
|
|
060c392c67 | ||
|
|
245f96868a | ||
|
|
b3f1071aaf | ||
|
|
e6811e8818 | ||
|
|
809930d36e | ||
|
|
f266b44cb0 | ||
|
|
31a3515e90 | ||
|
|
82b2786d6b | ||
|
|
6ab7cfa2be | ||
|
|
9b3f7248a8 | ||
|
|
a1835d6510 | ||
|
|
4484b3f66e | ||
|
|
cde3ae5f61 | ||
|
|
7d40b66300 | ||
|
|
63730c1f73 | ||
|
|
53ff3ad099 | ||
|
|
6406f56ca2 | ||
|
|
728b944360 | ||
|
|
3851a377df | ||
|
|
fe13789345 | ||
|
|
3033f187d2 | ||
|
|
276a5a3ee4 | ||
|
|
664f9a8b2d | ||
|
|
c73c2dc12c | ||
|
|
e634e64847 | ||
|
|
cdba30d45f | ||
|
|
74dcc7366d | ||
|
|
d137a8635b | ||
|
|
a2d4ff157e | ||
|
|
71d11644a7 | ||
|
|
8888520d83 | ||
|
|
cf3577550c | ||
|
|
58844874a0 | ||
|
|
78f24dda03 | ||
|
|
e17b3accb9 | ||
|
|
c6fa007e72 | ||
|
|
a3287948d1 | ||
|
|
2e9ab0625e | ||
|
|
3a5b62b550 | ||
|
|
412e54ce10 | ||
|
|
bca41f8d40 | ||
|
|
7c974ee668 | ||
|
|
abb462ef85 | ||
|
|
e66312b664 | ||
|
|
39f4b2b39e | ||
|
|
f6ff056b8a | ||
|
|
51ab4fb73d | ||
|
|
f5fd6bc49f | ||
|
|
2803fcbe22 | ||
|
|
67793c5d92 | ||
|
|
bcb5086c91 | ||
|
|
7bd2270290 | ||
|
|
a4ad5e6b69 | ||
|
|
6db909a3e7 | ||
|
|
642ae161c0 | ||
|
|
7b3c99b933 | ||
|
|
f86ce0390e | ||
|
|
7882f545e5 | ||
|
|
1c75e6876b | ||
|
|
6a0cd2dced | ||
|
|
c7f3915779 | ||
|
|
f45db1f909 | ||
|
|
3751a4018e | ||
|
|
0cb1b0642f | ||
|
|
48476362a3 | ||
|
|
2b4256baab | ||
|
|
e5a50f423d | ||
|
|
8b8a9a995d | ||
|
|
74fa4060c2 | ||
|
|
c877840c07 | ||
|
|
20248daeda | ||
|
|
a08d8dab70 | ||
|
|
fbbb4a86e9 | ||
|
|
1d12ba9d5f | ||
|
|
8c54054844 | ||
|
|
1732ae8c79 | ||
|
|
3b860ebdc7 | ||
|
|
29ac6463e1 | ||
|
|
01613f334b | ||
|
|
30dcaf9da0 | ||
|
|
b0aa9395da | ||
|
|
a153374b61 | ||
|
|
a791aab418 | ||
|
|
cb705317b8 | ||
|
|
e8e0f98f96 | ||
|
|
c878f66d05 | ||
|
|
fac6dec3c9 | ||
|
|
232f71b3b5 | ||
|
|
8881c13e60 | ||
|
|
d006a7b31f | ||
|
|
3d64d565d9 | ||
|
|
c1deaaeb2f | ||
|
|
b81417c2fa | ||
|
|
4937c9f9c2 | ||
|
|
db1d6b7963 | ||
|
|
fa07415721 | ||
|
|
b4b3350b3e | ||
|
|
4a1695f719 | ||
|
|
1b8d747937 | ||
|
|
a43a005f59 | ||
|
|
a612b74aeb | ||
|
|
131c897099 | ||
|
|
ef3a33b9d6 | ||
|
|
5132257b0d | ||
|
|
0c1f5f9edc | ||
|
|
e4382e4fc1 | ||
|
|
a3ce371312 | ||
|
|
42258ee289 | ||
|
|
b92eafb839 | ||
|
|
3b2e78db47 | ||
|
|
63da1a5560 | ||
|
|
1f3ffe38e8 | ||
|
|
c23b6b3879 | ||
|
|
909c7c8b5b | ||
|
|
4a8a9ef405 | ||
|
|
6c9f9fff73 | ||
|
|
8df33f7a81 | ||
|
|
7ec5ca88a1 | ||
|
|
e5f3d88a4e | ||
|
|
0ba125444a | ||
|
|
6c226e47bd | ||
|
|
8c3f58db9d | ||
|
|
4921243add | ||
|
|
91b3fcf85c | ||
|
|
1729a52f8b | ||
|
|
ed2f3fe80d | ||
|
|
f2ba389496 | ||
|
|
439f02b6b1 | ||
|
|
e32da08a26 | ||
|
|
82110e0927 | ||
|
|
7b3354a9ad | ||
|
|
5243e8baca | ||
|
|
98903028c7 | ||
|
|
7dd676439c | ||
|
|
fbead137f0 | ||
|
|
205a964d8f | ||
|
|
b45c6cd66b | ||
|
|
0015a204aa | ||
|
|
c7ed6d3428 | ||
|
|
cf30eafb49 | ||
|
|
14075ebf7f | ||
|
|
068f047dd5 | ||
|
|
f4c01384ec | ||
|
|
b7d44d6c4c | ||
|
|
33574403b5 | ||
|
|
dcc6efa3e6 | ||
|
|
56b9f11c98 | ||
|
|
f43a706364 | ||
|
|
f2b3dc5625 | ||
|
|
f73f97353b | ||
|
|
4dc31aac93 | ||
|
|
c1c44a7dd6 | ||
|
|
c5907747ad | ||
|
|
525c22de15 | ||
|
|
57981ca04a | ||
|
|
e668acc596 | ||
|
|
512dd2be63 | ||
|
|
8683ba8bb0 | ||
|
|
0b9e3d381b | ||
|
|
1f0577f8d5 | ||
|
|
53c5151692 | ||
|
|
55698f2524 | ||
|
|
471f82f0e0 | ||
|
|
60ada97b3d | ||
|
|
0de101bc4d | ||
|
|
95e2a0ef2e | ||
|
|
69cadcdedb | ||
|
|
6ac4ac66b9 | ||
|
|
3646725cf6 | ||
|
|
ff62facc0d | ||
|
|
b510527a9f | ||
|
|
3127dd4db6 | ||
|
|
d081e57316 | ||
|
|
1ffa7d18d3 | ||
|
|
687884fb94 | ||
|
|
5ab29b2e62 | ||
|
|
a6a2d2f444 | ||
|
|
9e95efcc16 | ||
|
|
8cba1170e6 | ||
|
|
5cb2d500d1 | ||
|
|
0212c618c6 | ||
|
|
88110ed268 | ||
|
|
fba02fdd8c | ||
|
|
b2934ad8d2 | ||
|
|
f7f410d033 | ||
|
|
885ff7396e | ||
|
|
61b38e8d0d | ||
|
|
edcde67076 | ||
|
|
f0612f7570 | ||
|
|
ace98e3a1e | ||
|
|
1ca9d87f0a | ||
|
|
967f965405 | ||
|
|
062e51e8ce | ||
|
|
a48e616def | ||
|
|
effa96f5e4 | ||
|
|
cc0b4be5b7 | ||
|
|
a464ffc23d | ||
|
|
4de2e8a898 | ||
|
|
0f09415469 | ||
|
|
f089cf185b | ||
|
|
15d3c1ae81 | ||
|
|
fba31d4e0a | ||
|
|
f81ae37677 | ||
|
|
5169d306ae | ||
|
|
4f3e97fff8 | ||
|
|
3ff01a9fc4 | ||
|
|
3d4e45a0e5 | ||
|
|
c659c33919 | ||
|
|
959f7754b2 | ||
|
|
23f04c4f38 | ||
|
|
d9af8f66ba | ||
|
|
aa39b8ca6f | ||
|
|
58c7065f08 | ||
|
|
b3783b403e | ||
|
|
e4503046de | ||
|
|
32a1c36597 | ||
|
|
7c9f9afc46 | ||
|
|
c1f99e0775 | ||
|
|
a32573bb58 | ||
|
|
e35d930ef9 | ||
|
|
60b7aebd0a | ||
|
|
45d2fd4299 | ||
|
|
71f8577713 | ||
|
|
043f763c51 | ||
|
|
8873e9b39e | ||
|
|
5e8f1c338c | ||
|
|
1d96ae9bc3 | ||
|
|
8d61fe0925 | ||
|
|
8a9fcddb3c | ||
|
|
c9400456f6 | ||
|
|
63ddd30ee4 | ||
|
|
bae29897d6 | ||
|
|
616981ecf9 | ||
|
|
98bf8ab098 | ||
|
|
c193137905 | ||
|
|
a07cdd6533 | ||
|
|
61e492e7e3 | ||
|
|
23d768a77b | ||
|
|
87b7162473 | ||
|
|
979c4d44f4 | ||
|
|
5d248cad89 | ||
|
|
b1cb72d088 | ||
|
|
55179d6ab2 | ||
|
|
192dfff680 | ||
|
|
915010e733 | ||
|
|
dbe4c9ffb5 | ||
|
|
0ee69671ba | ||
|
|
80e1d16ab8 | ||
|
|
b70de5b991 | ||
|
|
0b9e692298 | ||
|
|
cf1a60cb3a | ||
|
|
0d15861e23 | ||
|
|
cb19ebfe0c | ||
|
|
0d9ea41047 | ||
|
|
e9b4aa205f | ||
|
|
7674f1173c | ||
|
|
511026cab0 | ||
|
|
81ab37f235 | ||
|
|
6f2049ba9b | ||
|
|
52372fcbea | ||
|
|
f3751d83f8 | ||
|
|
b0b462581b | ||
|
|
8f48ed2597 | ||
|
|
c96068e78d | ||
|
|
7bca1f7d8d | ||
|
|
3618a84164 | ||
|
|
03ca408e94 | ||
|
|
e53e9c8ba3 | ||
|
|
941241c5f0 | ||
|
|
f8320fedd8 | ||
|
|
c808364c07 | ||
|
|
cccd829656 | ||
|
|
3650f6d7b8 | ||
|
|
6b7df6b242 | ||
|
|
b6ff786ed3 | ||
|
|
9c3789cbd0 | ||
|
|
29098f8397 | ||
|
|
fbdda8acb1 | ||
|
|
d03296237e | ||
|
|
b36fdc46db | ||
|
|
2a2d7f5768 | ||
|
|
4092c7f326 | ||
|
|
ef3e1037a8 | ||
|
|
baaa7b3fbb | ||
|
|
32718b7e31 | ||
|
|
c01611d8b5 | ||
|
|
7b1dcaffda | ||
|
|
c65dbaf88e | ||
|
|
c45728ac01 | ||
|
|
6f71409355 | ||
|
|
8d17c8651f | ||
|
|
b1143168e5 | ||
|
|
69456991f6 | ||
|
|
f410f3330f | ||
|
|
e1fcd203f8 | ||
|
|
0d8a4304a9 | ||
|
|
14cc5a5d6b | ||
|
|
287c2b1d18 | ||
|
|
7596ab69e0 | ||
|
|
1fdd77bffa | ||
|
|
2d424957fb | ||
|
|
dabc4fe00b | ||
|
|
5bf5b0acd2 | ||
|
|
099a8ff7d8 | ||
|
|
a28b7139e6 | ||
|
|
a0a469fe85 | ||
|
|
dbab55dd6b | ||
|
|
d2eb1edac3 | ||
|
|
5901dfee1a | ||
|
|
0e05b37082 | ||
|
|
37f4ce8604 | ||
|
|
12b5174850 | ||
|
|
b77ed193f7 | ||
|
|
d286ccb4f5 | ||
|
|
cac162aed7 | ||
|
|
a3a78ac6fb | ||
|
|
596483ff55 | ||
|
|
768859513a | ||
|
|
44bb79cd07 | ||
|
|
af9fb5d190 | ||
|
|
50a9d9e2c5 | ||
|
|
c0c71f82c0 | ||
|
|
93855b889a | ||
|
|
fa7e0fe6df | ||
|
|
b948f74b54 | ||
|
|
1a24ff8717 | ||
|
|
47fab0e393 | ||
|
|
313ac48765 | ||
|
|
d7780d53c9 | ||
|
|
ad0e6f73b3 | ||
|
|
546a8a58db | ||
|
|
acda1c075a | ||
|
|
686e5f1595 | ||
|
|
d2b6502c7a | ||
|
|
7c0f570845 | ||
|
|
382d4ca216 | ||
|
|
eaa371db8b | ||
|
|
d293ae2a69 | ||
|
|
d7ec241fd0 | ||
|
|
cd323f2ff1 | ||
|
|
32a66a99bf | ||
|
|
73ae801a13 | ||
|
|
ca4ed0932e | ||
|
|
bf48798bce | ||
|
|
9fc7c8b1af | ||
|
|
c8118e8411 | ||
|
|
65732197b8 | ||
|
|
959eef05ae | ||
|
|
e7ba67e1a8 | ||
|
|
13e618b128 | ||
|
|
36e6f0cb4b | ||
|
|
7450ae37a7 | ||
|
|
2ffda29f9b | ||
|
|
ff724e239d | ||
|
|
ee769832cf | ||
|
|
c4b7980b4f | ||
|
|
bfbac4f875 | ||
|
|
53e2f8090f | ||
|
|
e399e01a22 | ||
|
|
d9a62c4bbf | ||
|
|
a548b69679 | ||
|
|
ae64475d98 | ||
|
|
a342b1289d | ||
|
|
38f04b75a7 | ||
|
|
a771540b16 | ||
|
|
cf217d35a8 | ||
|
|
0e138e111f | ||
|
|
1fce4876f3 | ||
|
|
4a179d1ae1 | ||
|
|
a780ea10e9 | ||
|
|
6d2e190c8e | ||
|
|
b1cfbdcf7a | ||
|
|
24180f9014 | ||
|
|
15cdc680f6 | ||
|
|
666756bfbe | ||
|
|
a1b40f4314 | ||
|
|
29a0fe76d5 | ||
|
|
7753b9da6d | ||
|
|
f1764bba43 | ||
|
|
c2d7db7e06 | ||
|
|
21418c7414 | ||
|
|
fe781345d5 | ||
|
|
a614be7cb5 | ||
|
|
1eb89b8375 | ||
|
|
aa626a1e72 | ||
|
|
7f12b754e9 | ||
|
|
3f196f469d | ||
|
|
35eb378585 | ||
|
|
6db625f55b | ||
|
|
801cc2ed5d | ||
|
|
ded1e86e7e | ||
|
|
babf48c550 | ||
|
|
d3e807f6e9 | ||
|
|
7746e785c1 | ||
|
|
4e141d7f5d | ||
|
|
12842871fe | ||
|
|
fc85ae4014 | ||
|
|
5826f39dbe | ||
|
|
8ff56d7cd5 | ||
|
|
0843bce7ba | ||
|
|
dea0e0a721 | ||
|
|
e857ab1f81 | ||
|
|
0dda4b06ea | ||
|
|
cbe0226177 | ||
|
|
e8c8626878 | ||
|
|
4b215e0839 | ||
|
|
e1ff3bf8fa | ||
|
|
80f4ef9aac | ||
|
|
bba90d7f22 | ||
|
|
f8af3b86e5 | ||
|
|
6c3d8b8738 | ||
|
|
5a90e33bcc | ||
|
|
86b569e320 | ||
|
|
2350a2dc68 | ||
|
|
36d017dcc6 | ||
|
|
3bb081852c | ||
|
|
1ca58e876b | ||
|
|
e9cbcbaf03 | ||
|
|
07c9eec803 | ||
|
|
beae9ca0f7 | ||
|
|
07b9707ca1 | ||
|
|
45c05978b0 | ||
|
|
df08baf67f | ||
|
|
4066375737 | ||
|
|
a77b0b054a | ||
|
|
a17ff492a1 | ||
|
|
33b4c05557 | ||
|
|
005c055a7f | ||
|
|
3d1af19080 | ||
|
|
fa78da8156 | ||
|
|
01cfcf3b75 | ||
|
|
7ef4f5ac0b | ||
|
|
fc19ce41c4 | ||
|
|
6e00eef63a | ||
|
|
337c2febe3 | ||
|
|
f27beab016 | ||
|
|
4f1c6d1bb7 | ||
|
|
6b34909537 | ||
|
|
87284f0951 | ||
|
|
24bd5b1344 | ||
|
|
94a0d1a6bc | ||
|
|
f4e1205cbb | ||
|
|
d2c1791067 | ||
|
|
f4b4875cb1 | ||
|
|
29a841529f | ||
|
|
b51b5b763c | ||
|
|
8db6b48a76 | ||
|
|
ffd2c04cd3 | ||
|
|
70a4c36496 | ||
|
|
cba78e06ae | ||
|
|
3b3dbb4f40 | ||
|
|
7300002226 | ||
|
|
5426413cb6 | ||
|
|
2bc937f6c3 | ||
|
|
60fa0d5427 | ||
|
|
f429d3319f | ||
|
|
2e19f572ee | ||
|
|
64f603b076 | ||
|
|
679d1cd513 | ||
|
|
42644dac3f | ||
|
|
898ef57080 | ||
|
|
9702b2d88e | ||
|
|
d2b0afd859 | ||
|
|
5f6a1a8249 | ||
|
|
5d531989e7 | ||
|
|
3532602299 | ||
|
|
48bee55087 | ||
|
|
d00c9bb844 | ||
|
|
895e409d57 | ||
|
|
f0789aad05 | ||
|
|
7e480ab2f7 | ||
|
|
891f857547 | ||
|
|
01b1350dcc | ||
|
|
5e4e95fb0a | ||
|
|
9f421b81b8 | ||
|
|
6decfdda1f | ||
|
|
fc9b14a933 | ||
|
|
7292d0b696 | ||
|
|
a3844c1bfd | ||
|
|
791f22bbc8 | ||
|
|
1e7f97a111 | ||
|
|
bc40f5ae40 | ||
|
|
3504a8fc0a | ||
|
|
bfe0df5ab0 | ||
|
|
ed94df189f | ||
|
|
aa255298ef | ||
|
|
da86b6e062 | ||
|
|
75e340137d | ||
|
|
e659e09e29 | ||
|
|
163ca89cf4 | ||
|
|
85b598a614 | ||
|
|
b64851c5ec | ||
|
|
cc8420377e | ||
|
|
5740f1e63a | ||
|
|
c943e95812 | ||
|
|
4c0ebd55d3 | ||
|
|
e7ec77aa81 | ||
|
|
ddfd7523f7 | ||
|
|
2986077a28 | ||
|
|
3744957804 | ||
|
|
420d3064c5 | ||
|
|
eb4f6b74fb | ||
|
|
a116c4c2c7 | ||
|
|
7f674febb1 | ||
|
|
14252f5ef2 | ||
|
|
7a28b32f6d | ||
|
|
09cdf1e302 | ||
|
|
1eebd47072 | ||
|
|
002c41a7ca | ||
|
|
ab4fda6084 | ||
|
|
f9078d41cd | ||
|
|
4cc2b38059 | ||
|
|
d7040dc303 | ||
|
|
6bc1a0c76b | ||
|
|
5bd5651faa | ||
|
|
32ac159ba2 | ||
|
|
ee33f52736 | ||
|
|
4f30fa9d46 | ||
|
|
043f6e77ae | ||
|
|
48518df883 | ||
|
|
1f2b15397d | ||
|
|
87167f6581 | ||
|
|
b4768a8f81 | ||
|
|
3fb7343e73 | ||
|
|
5cfba5ff16 | ||
|
|
67c34a5937 | ||
|
|
94d7a7f873 | ||
|
|
d86567fbdc | ||
|
|
d115b3b3ed | ||
|
|
6420a2fe1f | ||
|
|
0eed9e5257 | ||
|
|
7801fcb993 | ||
|
|
e55be4dba6 | ||
|
|
12e1dad42e | ||
|
|
7cd59c38d3 | ||
|
|
8dc4a88aa6 | ||
|
|
52aebb3bca | ||
|
|
6b42b2aaee | ||
|
|
6ab9838977 | ||
|
|
9d82d4dfb9 | ||
|
|
9bb40c249f | ||
|
|
046b7a1425 | ||
|
|
c22a3a71f2 | ||
|
|
9c9940d88d | ||
|
|
561f83d044 | ||
|
|
43d325a139 | ||
|
|
0a6dd0efdf | ||
|
|
b921abf18f | ||
|
|
9bcd5d6664 | ||
|
|
bc58dbb2f5 | ||
|
|
b1547bbbb6 | ||
|
|
a7fdac1043 |
@@ -1,41 +0,0 @@
|
||||
environment:
|
||||
global:
|
||||
PROJECT_NAME: actix-web
|
||||
matrix:
|
||||
# Stable channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
# Nightly channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- ps: >-
|
||||
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\msys64\mingw64\bin'
|
||||
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\MinGW\bin'
|
||||
}
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs
|
||||
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustc -Vv
|
||||
- cargo -V
|
||||
|
||||
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
|
||||
build: false
|
||||
|
||||
# Equivalent to Travis' `script` phase
|
||||
test_script:
|
||||
- cargo clean
|
||||
- cargo test --no-default-features --features="flate2-rust"
|
||||
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Create a bug report.
|
||||
---
|
||||
|
||||
Your issue may already be reported!
|
||||
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
|
||||
* Rust Version (I.e, output of `rustc -V`):
|
||||
* Actix Web Version:
|
||||
15
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
15
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: GitHub Discussions
|
||||
url: https://github.com/actix/actix-web/discussions
|
||||
about: Actix Web Q&A
|
||||
- name: Gitter chat (actix-web)
|
||||
url: https://gitter.im/actix/actix-web
|
||||
about: Actix Web Q&A
|
||||
- name: Gitter chat (actix)
|
||||
url: https://gitter.im/actix/actix
|
||||
about: Actix (actor framework) Q&A
|
||||
- name: Actix Discord
|
||||
url: https://discord.gg/NWpN5mmg3x
|
||||
about: Actix developer discussion and community chat
|
||||
|
||||
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
<!-- Thanks for considering contributing actix! -->
|
||||
<!-- Please fill out the following to make our reviews easy. -->
|
||||
|
||||
## PR Type
|
||||
<!-- What kind of change does this PR make? -->
|
||||
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
|
||||
INSERT_PR_TYPE
|
||||
|
||||
|
||||
## PR Checklist
|
||||
Check your PR fulfills the following:
|
||||
|
||||
<!-- For draft PRs check the boxes as you complete them. -->
|
||||
|
||||
- [ ] Tests for the changes have been added / updated.
|
||||
- [ ] Documentation comments have been added / updated.
|
||||
- [ ] A changelog entry has been made for the appropriate packages.
|
||||
- [ ] Format code with the latest stable rustfmt
|
||||
|
||||
|
||||
## Overview
|
||||
<!-- Describe the current and new behavior. -->
|
||||
<!-- Emphasize any breaking changes. -->
|
||||
|
||||
|
||||
<!-- If this PR fixes or closes an issue, reference it here. -->
|
||||
<!-- Closes #000 -->
|
||||
28
.github/workflows/bench.yml
vendored
Normal file
28
.github/workflows/bench.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Benchmark (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
check_benchmark:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Check benchmark
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: bench
|
||||
args: --bench=server -- --sample-size=15
|
||||
32
.github/workflows/clippy-fmt.yml
vendored
Normal file
32
.github/workflows/clippy-fmt.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
name: Clippy and rustfmt Check
|
||||
jobs:
|
||||
clippy_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
components: rustfmt
|
||||
override: true
|
||||
- name: Check with rustfmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
components: clippy
|
||||
override: true
|
||||
- name: Check with Clippy
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --all-features --all --tests
|
||||
69
.github/workflows/linux.yml
vendored
Normal file
69
.github/workflows/linux.yml
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
name: CI (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- 1.42.0 # MSRV
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
|
||||
- name: tests (actix-http)
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
|
||||
|
||||
- name: tests (awc)
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --package=awc --no-default-features --features=rustls -- --nocapture
|
||||
|
||||
- name: Generate coverage file
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
run: |
|
||||
cargo install cargo-tarpaulin --vers "^0.13"
|
||||
cargo tarpaulin --out Xml
|
||||
- name: Upload to Codecov
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: cobertura.xml
|
||||
44
.github/workflows/macos.yml
vendored
Normal file
44
.github/workflows/macos.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: CI (macOS)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-apple-darwin
|
||||
runs-on: macOS-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
--skip=test_h2_content_length
|
||||
--skip=test_reading_deflate_encoding_large_random_rustls
|
||||
37
.github/workflows/upload-doc.yml
vendored
Normal file
37
.github/workflows/upload-doc.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Upload documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'actix/actix-web'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: doc
|
||||
args: --no-deps --workspace --all-features
|
||||
|
||||
- name: Tweak HTML
|
||||
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: target/doc
|
||||
64
.github/workflows/windows.yml
vendored
Normal file
64
.github/workflows/windows.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: CI (Windows)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install OpenSSL
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x64-windows
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
--skip=test_h2_content_length
|
||||
--skip=test_reading_deflate_encoding_large_random_rustls
|
||||
--skip=test_params
|
||||
--skip=test_simple
|
||||
--skip=test_expect_continue
|
||||
--skip=test_http10_keepalive
|
||||
--skip=test_slow_request
|
||||
--skip=test_connection_force_close
|
||||
--skip=test_connection_server_close
|
||||
--skip=test_connection_wait_queue_force_close
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -9,6 +9,10 @@ guide/build/
|
||||
*.pid
|
||||
*.sock
|
||||
*~
|
||||
.DS_Store
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# Configuration directory generated by CLion
|
||||
.idea
|
||||
|
||||
56
.travis.yml
56
.travis.yml
@@ -1,56 +0,0 @@
|
||||
language: rust
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
cache:
|
||||
cargo: true
|
||||
apt: true
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: beta
|
||||
- rust: nightly-2019-04-02
|
||||
allow_failures:
|
||||
- rust: nightly-2019-04-02
|
||||
|
||||
env:
|
||||
global:
|
||||
# - RUSTFLAGS="-C link-dead-code"
|
||||
- OPENSSL_VERSION=openssl-1.0.2
|
||||
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
|
||||
|
||||
before_cache: |
|
||||
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-04-02" ]]; then
|
||||
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install cargo-tarpaulin
|
||||
fi
|
||||
|
||||
# Add clippy
|
||||
before_script:
|
||||
- export PATH=$PATH:~/.cargo/bin
|
||||
|
||||
script:
|
||||
- cargo update
|
||||
- cargo check --all --no-default-features
|
||||
- cargo test --all-features --all -- --nocapture
|
||||
|
||||
# Upload docs
|
||||
after_success:
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
|
||||
cargo doc --all-features &&
|
||||
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
|
||||
git clone https://github.com/davisp/ghp-import.git &&
|
||||
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
|
||||
echo "Uploaded documentation"
|
||||
fi
|
||||
- |
|
||||
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-04-02" ]]; then
|
||||
taskset -c 0 cargo tarpaulin --out Xml --all --all-features
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
echo "Uploaded code coverage"
|
||||
fi
|
||||
552
CHANGES.md
552
CHANGES.md
@@ -1,5 +1,553 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
|
||||
|
||||
## 3.3.3 - 2021-12-18
|
||||
### Changed
|
||||
* Soft-deprecate `NormalizePath::default()`, noting upcoming behavior change in v4. [#2529]
|
||||
|
||||
[#2529]: https://github.com/actix/actix-web/pull/2529
|
||||
|
||||
|
||||
## 3.3.2 - 2020-12-01
|
||||
### Fixed
|
||||
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
|
||||
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
|
||||
* Increase minimum `socket2` version. [#1803]
|
||||
|
||||
[#1762]: https://github.com/actix/actix-web/pull/1762
|
||||
[#1798]: https://github.com/actix/actix-web/pull/1798
|
||||
[#1803]: https://github.com/actix/actix-web/pull/1803
|
||||
|
||||
|
||||
## 3.3.1 - 2020-11-29
|
||||
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
|
||||
|
||||
|
||||
## 3.3.0 - 2020-11-25
|
||||
### Added
|
||||
* Add `Either<A, B>` extractor helper. [#1788]
|
||||
|
||||
### Changed
|
||||
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
|
||||
|
||||
[#1773]: https://github.com/actix/actix-web/pull/1773
|
||||
[#1788]: https://github.com/actix/actix-web/pull/1788
|
||||
|
||||
|
||||
## 3.2.0 - 2020-10-30
|
||||
### Added
|
||||
* Implement `exclude_regex` for Logger middleware. [#1723]
|
||||
* Add request-local data extractor `web::ReqData`. [#1748]
|
||||
* Add ability to register closure for request middleware logging. [#1749]
|
||||
* Add `app_data` to `ServiceConfig`. [#1757]
|
||||
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
|
||||
|
||||
### Changed
|
||||
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
|
||||
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
|
||||
* Re-export bytes::Buf{Mut} in web module. [#1750]
|
||||
* Upgrade `pin-project` to `1.0`.
|
||||
|
||||
[#1723]: https://github.com/actix/actix-web/pull/1723
|
||||
[#1743]: https://github.com/actix/actix-web/pull/1743
|
||||
[#1748]: https://github.com/actix/actix-web/pull/1748
|
||||
[#1750]: https://github.com/actix/actix-web/pull/1750
|
||||
[#1754]: https://github.com/actix/actix-web/pull/1754
|
||||
[#1749]: https://github.com/actix/actix-web/pull/1749
|
||||
|
||||
|
||||
## 3.1.0 - 2020-09-29
|
||||
### Changed
|
||||
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
|
||||
to retain any trailing slashes. [#1695]
|
||||
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
|
||||
via `web::Data::from` [#1710]
|
||||
|
||||
### Fixed
|
||||
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
|
||||
|
||||
[#1695]: https://github.com/actix/actix-web/pull/1695
|
||||
[#1708]: https://github.com/actix/actix-web/pull/1708
|
||||
[#1710]: https://github.com/actix/actix-web/pull/1710
|
||||
|
||||
|
||||
## 3.0.2 - 2020-09-15
|
||||
### Fixed
|
||||
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
|
||||
|
||||
[#1678]: https://github.com/actix/actix-web/pull/1678
|
||||
|
||||
|
||||
## 3.0.1 - 2020-09-13
|
||||
### Changed
|
||||
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
|
||||
|
||||
[#1673]: https://github.com/actix/actix-web/pull/1673
|
||||
|
||||
|
||||
## 3.0.0 - 2020-09-11
|
||||
* No significant changes from `3.0.0-beta.4`.
|
||||
|
||||
|
||||
## 3.0.0-beta.4 - 2020-09-09
|
||||
### Added
|
||||
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
|
||||
slash, or as the new addition, always trimming trailing slashes. [#1639]
|
||||
|
||||
### Changed
|
||||
* Update actix-codec and actix-utils dependencies. [#1634]
|
||||
* `FormConfig` and `JsonConfig` configurations are now also considered when set
|
||||
using `App::data`. [#1641]
|
||||
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
|
||||
* `HttpServer::maxconnrate` is renamed to the more expressive
|
||||
`HttpServer::max_connection_rate`. [#1655]
|
||||
|
||||
[#1639]: https://github.com/actix/actix-web/pull/1639
|
||||
[#1641]: https://github.com/actix/actix-web/pull/1641
|
||||
[#1634]: https://github.com/actix/actix-web/pull/1634
|
||||
[#1655]: https://github.com/actix/actix-web/pull/1655
|
||||
|
||||
## 3.0.0-beta.3 - 2020-08-17
|
||||
### Changed
|
||||
* Update `rustls` to 0.18
|
||||
|
||||
|
||||
## 3.0.0-beta.2 - 2020-08-17
|
||||
### Changed
|
||||
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
|
||||
using `App::data`. [#1610]
|
||||
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
|
||||
destructuring. [#1594]
|
||||
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
|
||||
access `HttpRequest` which already allows this. [#1618]
|
||||
* Re-export all error types from `awc`. [#1621]
|
||||
* MSRV is now 1.42.0.
|
||||
|
||||
### Fixed
|
||||
* Memory leak of app data in pooled requests. [#1609]
|
||||
|
||||
[#1594]: https://github.com/actix/actix-web/pull/1594
|
||||
[#1609]: https://github.com/actix/actix-web/pull/1609
|
||||
[#1610]: https://github.com/actix/actix-web/pull/1610
|
||||
[#1618]: https://github.com/actix/actix-web/pull/1618
|
||||
[#1621]: https://github.com/actix/actix-web/pull/1621
|
||||
|
||||
|
||||
## 3.0.0-beta.1 - 2020-07-13
|
||||
### Added
|
||||
* Re-export `actix_rt::main` as `actix_web::main`.
|
||||
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
|
||||
resource pattern.
|
||||
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
|
||||
|
||||
### Changed
|
||||
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
|
||||
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
|
||||
* MSRV is now 1.41.1
|
||||
|
||||
### Fixed
|
||||
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
|
||||
|
||||
|
||||
## 3.0.0-alpha.3 - 2020-05-21
|
||||
### Added
|
||||
* Add option to create `Data<T>` from `Arc<T>` [#1509]
|
||||
|
||||
### Changed
|
||||
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
|
||||
* Fix audit issue logging by default peer address [#1485]
|
||||
* Bump minimum supported Rust version to 1.40
|
||||
* Replace deprecated `net2` crate with `socket2`
|
||||
|
||||
[#1485]: https://github.com/actix/actix-web/pull/1485
|
||||
[#1509]: https://github.com/actix/actix-web/pull/1509
|
||||
|
||||
## [3.0.0-alpha.2] - 2020-05-08
|
||||
|
||||
### Changed
|
||||
|
||||
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
|
||||
* Implement `std::error::Error` for our custom errors [#1422]
|
||||
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
|
||||
* Remove the `failure` feature and support.
|
||||
|
||||
[#1422]: https://github.com/actix/actix-web/pull/1422
|
||||
[#1433]: https://github.com/actix/actix-web/pull/1433
|
||||
[#1452]: https://github.com/actix/actix-web/pull/1452
|
||||
[#1486]: https://github.com/actix/actix-web/pull/1486
|
||||
|
||||
|
||||
## [3.0.0-alpha.1] - 2020-03-11
|
||||
|
||||
### Added
|
||||
|
||||
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
|
||||
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
|
||||
|
||||
### Changed
|
||||
|
||||
* Use `sha-1` crate instead of unmaintained `sha1` crate
|
||||
* Skip empty chunks when returning response from a `Stream` [#1308]
|
||||
* Update the `time` dependency to 0.2.7
|
||||
* Update `actix-tls` dependency to 2.0.0-alpha.1
|
||||
* Update `rustls` dependency to 0.17
|
||||
|
||||
[#1308]: https://github.com/actix/actix-web/pull/1308
|
||||
|
||||
## [2.0.0] - 2019-12-25
|
||||
|
||||
### Changed
|
||||
|
||||
* Rename `HttpServer::start()` to `HttpServer::run()`
|
||||
|
||||
* Allow to gracefully stop test server via `TestServer::stop()`
|
||||
|
||||
* Allow to specify multi-patterns for resources
|
||||
|
||||
## [2.0.0-rc] - 2019-12-20
|
||||
|
||||
### Changed
|
||||
|
||||
* Move `BodyEncoding` to `dev` module #1220
|
||||
|
||||
* Allow to set `peer_addr` for TestRequest #1074
|
||||
|
||||
* Make web::Data deref to Arc<T> #1214
|
||||
|
||||
* Rename `App::register_data()` to `App::app_data()`
|
||||
|
||||
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix `AppConfig::secure()` is always false. #1202
|
||||
|
||||
|
||||
## [2.0.0-alpha.6] - 2019-12-15
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed compilation with default features off
|
||||
|
||||
## [2.0.0-alpha.5] - 2019-12-13
|
||||
|
||||
### Added
|
||||
|
||||
* Add test server, `test::start()` and `test::start_with()`
|
||||
|
||||
## [2.0.0-alpha.4] - 2019-12-08
|
||||
|
||||
### Deleted
|
||||
|
||||
* Delete HttpServer::run(), it is not useful with async/await
|
||||
|
||||
## [2.0.0-alpha.3] - 2019-12-07
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
|
||||
|
||||
## [2.0.0-alpha.1] - 2019-11-22
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrated to `std::future`
|
||||
|
||||
* Remove implementation of `Responder` for `()`. (#1167)
|
||||
|
||||
|
||||
## [1.0.9] - 2019-11-14
|
||||
|
||||
### Added
|
||||
|
||||
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
|
||||
|
||||
### Changed
|
||||
|
||||
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
|
||||
|
||||
|
||||
## [1.0.8] - 2019-09-25
|
||||
|
||||
### Added
|
||||
|
||||
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
|
||||
`App::register_data`.
|
||||
|
||||
* Add `middleware::Condition` that conditionally enables another middleware
|
||||
|
||||
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
|
||||
|
||||
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
|
||||
which is useful for example with systemd.
|
||||
|
||||
### Changed
|
||||
|
||||
* Make UrlEncodedError::Overflow more informative
|
||||
|
||||
* Use actix-testing for testing utils
|
||||
|
||||
|
||||
## [1.0.7] - 2019-08-29
|
||||
|
||||
### Fixed
|
||||
|
||||
* Request Extensions leak #1062
|
||||
|
||||
|
||||
## [1.0.6] - 2019-08-28
|
||||
|
||||
### Added
|
||||
|
||||
* Re-implement Host predicate (#989)
|
||||
|
||||
* Form implements Responder, returning a `application/x-www-form-urlencoded` response
|
||||
|
||||
* Add `into_inner` to `Data`
|
||||
|
||||
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
|
||||
the header in test requests.
|
||||
|
||||
### Changed
|
||||
|
||||
* `Query` payload made `pub`. Allows user to pattern-match the payload.
|
||||
|
||||
* Enable `rust-tls` feature for client #1045
|
||||
|
||||
* Update serde_urlencoded to 0.6.1
|
||||
|
||||
* Update url to 2.1
|
||||
|
||||
|
||||
## [1.0.5] - 2019-07-18
|
||||
|
||||
### Added
|
||||
|
||||
* Unix domain sockets (HttpServer::bind_uds) #92
|
||||
|
||||
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
|
||||
logging level
|
||||
|
||||
### Fixed
|
||||
|
||||
* Restored logging of errors through the `Logger` middleware
|
||||
|
||||
|
||||
## [1.0.4] - 2019-07-17
|
||||
|
||||
### Added
|
||||
|
||||
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
|
||||
|
||||
* Allow to access app's resource map via
|
||||
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
|
||||
|
||||
### Changed
|
||||
|
||||
* Upgrade `rand` dependency version to 0.7
|
||||
|
||||
|
||||
## [1.0.3] - 2019-06-28
|
||||
|
||||
### Added
|
||||
|
||||
* Support asynchronous data factories #850
|
||||
|
||||
### Changed
|
||||
|
||||
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
|
||||
|
||||
|
||||
## [1.0.2] - 2019-06-17
|
||||
|
||||
### Changed
|
||||
|
||||
* Move cors middleware to `actix-cors` crate.
|
||||
|
||||
* Move identity middleware to `actix-identity` crate.
|
||||
|
||||
|
||||
## [1.0.1] - 2019-06-17
|
||||
|
||||
### Added
|
||||
|
||||
* Add support for PathConfig #903
|
||||
|
||||
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
|
||||
|
||||
### Changed
|
||||
|
||||
* Move cors middleware to `actix-cors` crate.
|
||||
|
||||
* Move identity middleware to `actix-identity` crate.
|
||||
|
||||
* Disable default feature `secure-cookies`.
|
||||
|
||||
* Allow to test an app that uses async actors #897
|
||||
|
||||
* Re-apply patch from #637 #894
|
||||
|
||||
### Fixed
|
||||
|
||||
* HttpRequest::url_for is broken with nested scopes #915
|
||||
|
||||
|
||||
## [1.0.0] - 2019-06-05
|
||||
|
||||
### Added
|
||||
|
||||
* Add `Scope::configure()` method.
|
||||
|
||||
* Add `ServiceRequest::set_payload()` method.
|
||||
|
||||
* Add `test::TestRequest::set_json()` convenience method to automatically
|
||||
serialize data and set header in test requests.
|
||||
|
||||
* Add macros for head, options, trace, connect and patch http methods
|
||||
|
||||
### Changed
|
||||
|
||||
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix Logger request time format, and use rfc3339. #867
|
||||
|
||||
* Clear http requests pool on app service drop #860
|
||||
|
||||
|
||||
## [1.0.0-rc] - 2019-05-18
|
||||
|
||||
### Add
|
||||
|
||||
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
|
||||
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
|
||||
|
||||
### Changed
|
||||
|
||||
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Codegen with parameters in the path only resolves the first registered endpoint #841
|
||||
|
||||
|
||||
## [1.0.0-beta.4] - 2019-05-12
|
||||
|
||||
### Add
|
||||
|
||||
* Allow to set/override app data on scope level
|
||||
|
||||
### Changed
|
||||
|
||||
* `App::configure` take an `FnOnce` instead of `Fn`
|
||||
* Upgrade actix-net crates
|
||||
|
||||
|
||||
## [1.0.0-beta.3] - 2019-05-04
|
||||
|
||||
### Added
|
||||
|
||||
* Add helper function for executing futures `test::block_fn()`
|
||||
|
||||
### Changed
|
||||
|
||||
* Extractor configuration could be registered with `App::data()`
|
||||
or with `Resource::data()` #775
|
||||
|
||||
* Route data is unified with app data, `Route::data()` moved to resource
|
||||
level to `Resource::data()`
|
||||
|
||||
* CORS handling without headers #702
|
||||
|
||||
* Allow to construct `Data` instances to avoid double `Arc` for `Send + Sync` types.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix `NormalizePath` middleware impl #806
|
||||
|
||||
### Deleted
|
||||
|
||||
* `App::data_factory()` is deleted.
|
||||
|
||||
|
||||
## [1.0.0-beta.2] - 2019-04-24
|
||||
|
||||
### Added
|
||||
|
||||
* Add raw services support via `web::service()`
|
||||
|
||||
* Add helper functions for reading response body `test::read_body()`
|
||||
|
||||
* Add support for `remainder match` (i.e "/path/{tail}*")
|
||||
|
||||
* Extend `Responder` trait, allow to override status code and headers.
|
||||
|
||||
* Store visit and login timestamp in the identity cookie #502
|
||||
|
||||
### Changed
|
||||
|
||||
* `.to_async()` handler can return `Responder` type #792
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix async web::Data factory handling
|
||||
|
||||
|
||||
## [1.0.0-beta.1] - 2019-04-20
|
||||
|
||||
### Added
|
||||
|
||||
* Add helper functions for reading test response body,
|
||||
`test::read_response()` and test::read_response_json()`
|
||||
|
||||
* Add `.peer_addr()` #744
|
||||
|
||||
* Add `NormalizePath` middleware
|
||||
|
||||
### Changed
|
||||
|
||||
* Rename `RouterConfig` to `ServiceConfig`
|
||||
|
||||
* Rename `test::call_success` to `test::call_service`
|
||||
|
||||
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
|
||||
|
||||
* `CookieIdentityPolicy::max_age()` accepts value in seconds
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed `TestRequest::app_data()`
|
||||
|
||||
|
||||
## [1.0.0-alpha.6] - 2019-04-14
|
||||
|
||||
### Changed
|
||||
|
||||
* Allow to use any service as default service.
|
||||
|
||||
* Remove generic type for request payload, always use default.
|
||||
|
||||
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
|
||||
automatically decompress payload.
|
||||
|
||||
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
|
||||
|
||||
|
||||
## [1.0.0-alpha.5] - 2019-04-12
|
||||
|
||||
### Added
|
||||
|
||||
* Added async io `TestBuffer` for testing.
|
||||
|
||||
### Deleted
|
||||
|
||||
* Removed native-tls support
|
||||
|
||||
|
||||
## [1.0.0-alpha.4] - 2019-04-08
|
||||
|
||||
### Added
|
||||
@@ -18,6 +566,10 @@
|
||||
|
||||
* Move multipart support to actix-multipart crate
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix body propagation in Response::from_error. #760
|
||||
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-04-02
|
||||
|
||||
|
||||
@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
[@robjtede]: https://github.com/robjtede
|
||||
[@JohnTitor]: https://github.com/JohnTitor
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
145
Cargo.toml
145
Cargo.toml
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "actix-web"
|
||||
version = "1.0.0-alpha.4"
|
||||
version = "3.3.3"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
|
||||
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
|
||||
readme = "README.md"
|
||||
keywords = ["actix", "http", "web", "framework", "async"]
|
||||
homepage = "https://actix.rs"
|
||||
@@ -11,10 +11,12 @@ documentation = "https://docs.rs/actix-web/"
|
||||
categories = ["network-programming", "asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["openssl", "rustls", "compress", "secure-cookies"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "actix/actix-web", branch = "master" }
|
||||
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
|
||||
@@ -29,86 +31,93 @@ members = [
|
||||
"awc",
|
||||
"actix-http",
|
||||
"actix-files",
|
||||
"actix-session",
|
||||
"actix-multipart",
|
||||
"actix-web-actors",
|
||||
"actix-web-codegen",
|
||||
"test-server",
|
||||
]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["ssl", "tls", "brotli", "flate2-zlib", "secure-cookies", "client", "rust-tls"]
|
||||
|
||||
[features]
|
||||
default = ["brotli", "flate2-zlib", "secure-cookies", "client"]
|
||||
default = ["compress"]
|
||||
|
||||
# http client
|
||||
client = ["awc"]
|
||||
# content-encoding support
|
||||
compress = ["actix-http/compress", "awc/compress"]
|
||||
|
||||
# brotli encoding, requires c compiler
|
||||
brotli = ["actix-http/brotli"]
|
||||
|
||||
# miniz-sys backend for flate2 crate
|
||||
flate2-zlib = ["actix-http/flate2-zlib"]
|
||||
|
||||
# rust backend for flate2 crate
|
||||
flate2-rust = ["actix-http/flate2-rust"]
|
||||
|
||||
# sessions feature, session require "ring" crate and c compiler
|
||||
# sessions feature
|
||||
secure-cookies = ["actix-http/secure-cookies"]
|
||||
|
||||
# tls
|
||||
tls = ["native-tls", "actix-server/ssl"]
|
||||
|
||||
# openssl
|
||||
ssl = ["openssl", "actix-server/ssl", "awc/ssl"]
|
||||
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
|
||||
|
||||
# rustls
|
||||
rust-tls = ["rustls", "actix-server/rust-tls"]
|
||||
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
|
||||
|
||||
[[example]]
|
||||
name = "basic"
|
||||
required-features = ["compress"]
|
||||
|
||||
[[example]]
|
||||
name = "uds"
|
||||
required-features = ["compress"]
|
||||
|
||||
[[test]]
|
||||
name = "test_server"
|
||||
required-features = ["compress"]
|
||||
|
||||
[[example]]
|
||||
name = "on_connect"
|
||||
required-features = []
|
||||
|
||||
[[example]]
|
||||
name = "client"
|
||||
required-features = ["rustls"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.1.1"
|
||||
actix-service = "0.3.6"
|
||||
actix-utils = "0.3.4"
|
||||
actix-router = "0.1.2"
|
||||
actix-rt = "0.2.2"
|
||||
actix-web-codegen = "0.1.0-alpha.1"
|
||||
actix-http = { version = "0.1.0-alpha.4", features=["fail"] }
|
||||
actix-server = "0.4.2"
|
||||
actix-server-config = "0.1.0"
|
||||
actix-threadpool = "0.1.0"
|
||||
awc = { version = "0.1.0-alpha.4", optional = true }
|
||||
actix-codec = "0.3.0"
|
||||
actix-service = "1.0.6"
|
||||
actix-utils = "2.0.0"
|
||||
actix-router = "0.2.4"
|
||||
actix-rt = "1.1.1"
|
||||
actix-server = "1.0.0"
|
||||
actix-testing = "1.0.0"
|
||||
actix-macros = "0.1.0"
|
||||
actix-threadpool = "0.3.1"
|
||||
actix-tls = "2.0.0"
|
||||
|
||||
bytes = "0.4"
|
||||
derive_more = "0.14"
|
||||
encoding = "0.2"
|
||||
futures = "0.1"
|
||||
hashbrown = "0.1.8"
|
||||
actix-web-codegen = "0.4.0"
|
||||
actix-http = "2.2.0"
|
||||
awc = { version = "2.0.3", default-features = false }
|
||||
|
||||
bytes = "0.5.3"
|
||||
derive_more = "0.99.5"
|
||||
encoding_rs = "0.8"
|
||||
futures-channel = { version = "0.3.5", default-features = false }
|
||||
futures-core = { version = "0.3.5", default-features = false }
|
||||
futures-util = { version = "0.3.5", default-features = false }
|
||||
fxhash = "0.2.1"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
net2 = "0.2.33"
|
||||
parking_lot = "0.7"
|
||||
regex = "1.0"
|
||||
serde = { version = "1.0", features=["derive"] }
|
||||
socket2 = "0.3.16"
|
||||
pin-project = "1.0.0"
|
||||
regex = "1.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_urlencoded = "^0.5.3"
|
||||
time = "0.1"
|
||||
url = { version="1.7", features=["query_encoding"] }
|
||||
|
||||
# ssl support
|
||||
native-tls = { version="0.2", optional = true }
|
||||
openssl = { version="0.10", optional = true }
|
||||
rustls = { version = "^0.15", optional = true }
|
||||
serde_urlencoded = "0.7"
|
||||
time = { version = "0.2.7", default-features = false, features = ["std"] }
|
||||
url = "2.1"
|
||||
open-ssl = { package = "openssl", version = "0.10", optional = true }
|
||||
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
|
||||
tinyvec = { version = "1", features = ["alloc"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-http = { version = "0.1.0-alpha.4", features=["ssl", "brotli", "flate2-zlib"] }
|
||||
actix-http-test = { version = "0.1.0-alpha.3", features=["ssl"] }
|
||||
rand = "0.6"
|
||||
env_logger = "0.6"
|
||||
actix = "0.10.0"
|
||||
actix-http = { version = "2.1.0", features = ["actors"] }
|
||||
rand = "0.7"
|
||||
env_logger = "0.8"
|
||||
serde_derive = "1.0"
|
||||
tokio-timer = "0.2.8"
|
||||
brotli2 = "0.3.2"
|
||||
flate2 = "1.0.2"
|
||||
brotli = "3.3.3"
|
||||
flate2 = "1.0.13"
|
||||
criterion = "0.3"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
@@ -116,12 +125,18 @@ opt-level = 3
|
||||
codegen-units = 1
|
||||
|
||||
[patch.crates-io]
|
||||
actix = { git = "https://github.com/actix/actix.git" }
|
||||
actix-web = { path = "." }
|
||||
actix-http = { path = "actix-http" }
|
||||
actix-http-test = { path = "test-server" }
|
||||
actix-web-codegen = { path = "actix-web-codegen" }
|
||||
actix-web-actors = { path = "actix-web-actors" }
|
||||
actix-session = { path = "actix-session" }
|
||||
actix-files = { path = "actix-files" }
|
||||
awc = { path = "awc" }
|
||||
actix-multipart = { path = "actix-multipart" }
|
||||
awc = { path = "awc" }
|
||||
|
||||
[[bench]]
|
||||
name = "server"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "service"
|
||||
harness = false
|
||||
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
Copyright 2017-NOW Actix Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
Copyright (c) 2017 Actix Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
|
||||
423
MIGRATION.md
423
MIGRATION.md
@@ -1,8 +1,324 @@
|
||||
## 1.0
|
||||
## Unreleased
|
||||
|
||||
|
||||
## 3.0.0
|
||||
|
||||
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
|
||||
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
|
||||
|
||||
* Cookie handling has been offloaded to the `cookie` crate:
|
||||
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
|
||||
* Some types now require lifetime parameters.
|
||||
|
||||
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
|
||||
any `actix-web` method previously expecting a time v0.1 input.
|
||||
|
||||
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
|
||||
result in `SameSite=None` being sent with the response Set-Cookie header.
|
||||
To create a cookie without a SameSite attribute, remove any calls setting same_site.
|
||||
|
||||
* actix-http support for Actors messages was moved to actix-http crate and is enabled
|
||||
with feature `actors`
|
||||
|
||||
* content_length function is removed from actix-http.
|
||||
You can set Content-Length by normally setting the response body or calling no_chunking function.
|
||||
|
||||
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
`u64` instead of a `usize`.
|
||||
|
||||
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
|
||||
destructuring or `.into_inner()`. For example:
|
||||
|
||||
```rust
|
||||
// Previously:
|
||||
async fn some_route(path: web::Path<(String, String)>) -> String {
|
||||
format!("Hello, {} {}", path.0, path.1)
|
||||
}
|
||||
|
||||
// Now (this also worked before):
|
||||
async fn some_route(path: web::Path<(String, String)>) -> String {
|
||||
let (first_name, last_name) = path.into_inner();
|
||||
format!("Hello, {} {}", first_name, last_name)
|
||||
}
|
||||
// Or (this wasn't previously supported):
|
||||
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
|
||||
format!("Hello, {} {}", first_name, last_name)
|
||||
}
|
||||
```
|
||||
|
||||
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
|
||||
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
|
||||
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
|
||||
|
||||
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
|
||||
|
||||
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
|
||||
|
||||
|
||||
## 2.0.0
|
||||
|
||||
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
|
||||
`.await` on `run` method result, in that case it awaits server exit.
|
||||
|
||||
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
|
||||
Stored data is available via `HttpRequest::app_data()` method at runtime.
|
||||
|
||||
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
|
||||
|
||||
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
|
||||
replace `fn` with `async fn` to convert sync handler to async
|
||||
|
||||
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
|
||||
test server use `test::start()` or `test_start_with_config()` methods
|
||||
|
||||
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
|
||||
http response.
|
||||
|
||||
* Feature `rust-tls` renamed to `rustls`
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["rust-tls"] }
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["rustls"] }
|
||||
```
|
||||
|
||||
* Feature `ssl` renamed to `openssl`
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["ssl"] }
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["openssl"] }
|
||||
```
|
||||
* `Cors` builder now requires that you call `.finish()` to construct the middleware
|
||||
|
||||
## 1.0.1
|
||||
|
||||
* Cors middleware has been moved to `actix-cors` crate
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
use actix_web::middleware::cors::Cors;
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
use actix_cors::Cors;
|
||||
```
|
||||
|
||||
* Identity middleware has been moved to `actix-identity` crate
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
|
||||
```
|
||||
|
||||
|
||||
## 1.0.0
|
||||
|
||||
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
|
||||
#[derive(Default)]
|
||||
struct ExtractorConfig {
|
||||
config: String,
|
||||
}
|
||||
|
||||
impl FromRequest for YourExtractor {
|
||||
type Config = ExtractorConfig;
|
||||
type Result = Result<YourExtractor, Error>;
|
||||
|
||||
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
|
||||
println!("use the config: {:?}", cfg.config);
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
App::new().resource("/route_with_config", |r| {
|
||||
r.post().with_config(handler_fn, |cfg| {
|
||||
cfg.0.config = "test".to_string();
|
||||
})
|
||||
})
|
||||
|
||||
```
|
||||
|
||||
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
|
||||
|
||||
```rust
|
||||
#[derive(Default)]
|
||||
struct ExtractorConfig {
|
||||
config: String,
|
||||
}
|
||||
|
||||
impl FromRequest for YourExtractor {
|
||||
type Error = Error;
|
||||
type Future = Result<Self, Self::Error>;
|
||||
type Config = ExtractorConfig;
|
||||
|
||||
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
|
||||
let cfg = req.app_data::<ExtractorConfig>();
|
||||
println!("config data?: {:?}", cfg.unwrap().role);
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
App::new().service(
|
||||
resource("/route_with_config")
|
||||
.data(ExtractorConfig {
|
||||
config: "test".to_string(),
|
||||
})
|
||||
.route(post().to(handler_fn)),
|
||||
)
|
||||
```
|
||||
|
||||
* Resource registration. 1.0 version uses generalized resource
|
||||
registration via `.service()` method.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.f(welcome))
|
||||
```
|
||||
|
||||
use App's or Scope's `.service()` method. `.service()` method accepts
|
||||
object that implements `HttpServiceFactory` trait. By default
|
||||
actix-web provides `Resource` and `Scope` services.
|
||||
|
||||
```rust
|
||||
App.new().service(
|
||||
web::resource("/welcome")
|
||||
.route(web::get().to(welcome))
|
||||
.route(web::post().to(post_handler))
|
||||
```
|
||||
|
||||
* Scope registration.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
let app = App::new().scope("/{project_id}", |scope| {
|
||||
scope
|
||||
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
|
||||
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
|
||||
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
|
||||
});
|
||||
```
|
||||
|
||||
use `.service()` for registration and `web::scope()` as scope object factory.
|
||||
|
||||
```rust
|
||||
let app = App::new().service(
|
||||
web::scope("/{project_id}")
|
||||
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
|
||||
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
|
||||
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
|
||||
);
|
||||
```
|
||||
|
||||
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.with(welcome))
|
||||
```
|
||||
|
||||
use `.to()` or `.to_async()` methods
|
||||
|
||||
```rust
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
* Passing arguments to handler with extractors, multiple arguments are allowed
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
use multiple arguments
|
||||
|
||||
```rust
|
||||
fn welcome(body: Bytes, req: HttpRequest) -> ... {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
|
||||
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
|
||||
must use extractors.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.f(welcome))
|
||||
```
|
||||
|
||||
use App's `to()` or `to_async()` methods
|
||||
|
||||
```rust
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
* `HttpRequest` does not provide access to request's payload stream.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
|
||||
req
|
||||
.payload()
|
||||
.from_err()
|
||||
.fold((), |_, chunk| {
|
||||
...
|
||||
})
|
||||
.map(|_| HttpResponse::Ok().finish())
|
||||
.responder()
|
||||
}
|
||||
```
|
||||
|
||||
use `Payload` extractor
|
||||
|
||||
```rust
|
||||
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
|
||||
stream
|
||||
.from_err()
|
||||
.fold((), |_, chunk| {
|
||||
...
|
||||
})
|
||||
.map(|_| HttpResponse::Ok().finish())
|
||||
}
|
||||
```
|
||||
|
||||
* `State` is now `Data`. You register Data during the App initialization process
|
||||
and then access it from handlers either using a Data extractor or using
|
||||
HttpRequest's api.
|
||||
and then access it from handlers either using a Data extractor or using
|
||||
HttpRequest's api.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -36,7 +352,7 @@ HttpRequest's api.
|
||||
```
|
||||
|
||||
|
||||
* AsyncResponder is deprecated.
|
||||
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -52,6 +368,81 @@ HttpRequest's api.
|
||||
.. simply omit AsyncResponder and the corresponding responder() finish method
|
||||
|
||||
|
||||
* Middleware
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
let app = App::new()
|
||||
.middleware(middleware::Logger::default())
|
||||
```
|
||||
|
||||
use `.wrap()` method
|
||||
|
||||
```rust
|
||||
let app = App::new()
|
||||
.wrap(middleware::Logger::default())
|
||||
.route("/index.html", web::get().to(index));
|
||||
```
|
||||
|
||||
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
|
||||
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(req: &HttpRequest) -> Responder {
|
||||
req.body()
|
||||
.and_then(|body| {
|
||||
...
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
fn index(body: Bytes) -> Responder {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
|
||||
|
||||
* StaticFiles and NamedFile have been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::fs::StaticFile`
|
||||
|
||||
use `use actix_files::Files`
|
||||
|
||||
instead of `use actix_web::fs::Namedfile`
|
||||
|
||||
use `use actix_files::NamedFile`
|
||||
|
||||
* Multipart has been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::multipart::Multipart`
|
||||
|
||||
use `use actix_multipart::Multipart`
|
||||
|
||||
* Response compression is not enabled by default.
|
||||
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
|
||||
|
||||
* Session middleware moved to actix-session crate
|
||||
|
||||
* Actors support have been moved to `actix-web-actors` crate
|
||||
|
||||
* Custom Error
|
||||
|
||||
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
|
||||
|
||||
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
|
||||
|
||||
```rust
|
||||
fn render_response(&self) -> HttpResponse {
|
||||
self.error_response()
|
||||
}
|
||||
```
|
||||
|
||||
## 0.7.15
|
||||
|
||||
@@ -93,9 +484,9 @@ HttpRequest's api.
|
||||
|
||||
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
|
||||
use `HttpMessage::payload()` method.
|
||||
|
||||
|
||||
instead of
|
||||
|
||||
|
||||
```rust
|
||||
fn index(req: HttpRequest) -> impl Responder {
|
||||
req
|
||||
@@ -121,8 +512,8 @@ HttpRequest's api.
|
||||
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
|
||||
|
||||
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
|
||||
|
||||
instead of
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
|
||||
@@ -138,7 +529,7 @@ HttpRequest's api.
|
||||
|
||||
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
|
||||
|
||||
* Removed deprecated `HttpServer::threads()`, use
|
||||
* Removed deprecated `HttpServer::threads()`, use
|
||||
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
|
||||
|
||||
* Renamed `client::ClientConnectorError::Connector` to
|
||||
@@ -147,7 +538,7 @@ HttpRequest's api.
|
||||
* `Route::with()` does not return `ExtractorConfig`, to configure
|
||||
extractor use `Route::with_config()`
|
||||
|
||||
instead of
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
@@ -158,11 +549,11 @@ HttpRequest's api.
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
|
||||
|
||||
fn main() {
|
||||
let app = App::new().resource("/index.html", |r| {
|
||||
r.method(http::Method::GET)
|
||||
@@ -192,12 +583,12 @@ HttpRequest's api.
|
||||
* `HttpRequest::extensions()` returns read only reference to the request's Extension
|
||||
`HttpRequest::extensions_mut()` returns mutable reference.
|
||||
|
||||
* Instead of
|
||||
* Instead of
|
||||
|
||||
`use actix_web::middleware::{
|
||||
CookieSessionBackend, CookieSessionError, RequestSession,
|
||||
Session, SessionBackend, SessionImpl, SessionStorage};`
|
||||
|
||||
|
||||
use `actix_web::middleware::session`
|
||||
|
||||
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
|
||||
|
||||
111
README.md
111
README.md
@@ -1,80 +1,111 @@
|
||||
# Actix web [](https://travis-ci.org/actix/actix-web) [](https://codecov.io/gh/actix/actix-web) [](https://crates.io/crates/actix-web) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
<div align="center">
|
||||
<h1>Actix web</h1>
|
||||
<p>
|
||||
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
|
||||
</p>
|
||||
<p>
|
||||
|
||||
Actix web is a simple, pragmatic and extremely fast web framework for Rust.
|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://docs.rs/actix-web/3.3.3)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
|
||||

|
||||
[](https://deps.rs/crate/actix-web/3.3.3)
|
||||
<br />
|
||||
[](https://travis-ci.org/actix/actix-web)
|
||||
[](https://codecov.io/gh/actix/actix-web)
|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
* Supported *HTTP/1.x* and *HTTP/2.0* protocols
|
||||
</p>
|
||||
</div>
|
||||
|
||||
## Features
|
||||
|
||||
* Supports *HTTP/1.x* and *HTTP/2*
|
||||
* Streaming and pipelining
|
||||
* Keep-alive and slow requests handling
|
||||
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
|
||||
* Transparent content compression/decompression (br, gzip, deflate)
|
||||
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
|
||||
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
|
||||
* Multipart streams
|
||||
* Static assets
|
||||
* SSL support with OpenSSL or Rustls
|
||||
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
|
||||
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
|
||||
* SSL support using OpenSSL or Rustls
|
||||
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
|
||||
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
|
||||
* Supports [Actix actor framework](https://github.com/actix/actix)
|
||||
* Runs on stable Rust 1.42+
|
||||
|
||||
## Documentation & community resources
|
||||
## Documentation
|
||||
|
||||
* [User Guide](https://actix.rs/docs/)
|
||||
* [API Documentation (Development)](https://actix.rs/actix-web/actix_web/)
|
||||
* [API Documentation (0.7 Release)](https://docs.rs/actix-web/0.7.19/actix_web/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-web](https://crates.io/crates/actix-web)
|
||||
* Minimum supported Rust version: 1.32 or later
|
||||
* [Website & User Guide](https://actix.rs)
|
||||
* [Examples Repository](https://github.com/actix/examples)
|
||||
* [API Documentation](https://docs.rs/actix-web)
|
||||
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
use actix_web::{web, App, HttpServer, Responder};
|
||||
Dependencies:
|
||||
|
||||
fn index(info: web::Path<(u32, String)>) -> impl Responder {
|
||||
format!("Hello {}! id:{}", info.1, info.0)
|
||||
```toml
|
||||
[dependencies]
|
||||
actix-web = "3"
|
||||
```
|
||||
|
||||
Code:
|
||||
|
||||
```rust
|
||||
use actix_web::{get, web, App, HttpServer, Responder};
|
||||
|
||||
#[get("/{id}/{name}/index.html")]
|
||||
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
|
||||
format!("Hello {}! id:{}", name, id)
|
||||
}
|
||||
|
||||
fn main() -> std::io::Result<()> {
|
||||
HttpServer::new(
|
||||
|| App::new().service(
|
||||
web::resource("/{id}/{name}/index.html")
|
||||
.route(web::get().to(index))))
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
HttpServer::new(|| App::new().service(index))
|
||||
.bind("127.0.0.1:8080")?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
### More examples
|
||||
|
||||
* [Basics](https://github.com/actix/examples/tree/master/basics/)
|
||||
* [Stateful](https://github.com/actix/examples/tree/master/state/)
|
||||
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
|
||||
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
|
||||
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
|
||||
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
|
||||
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
|
||||
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
|
||||
* [SSL / HTTP/2.0](https://github.com/actix/examples/tree/master/tls/)
|
||||
* [Tcp/Websocket chat](https://github.com/actix/examples/tree/master/websocket-chat/)
|
||||
* [Json](https://github.com/actix/examples/tree/master/json/)
|
||||
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
|
||||
* [Application State](https://github.com/actix/examples/tree/master/state/)
|
||||
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
|
||||
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
|
||||
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
|
||||
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
|
||||
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
|
||||
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
|
||||
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
|
||||
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
|
||||
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
|
||||
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
|
||||
|
||||
You may consider checking out
|
||||
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
|
||||
One of the fastest web frameworks available according to the
|
||||
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-web crate is organized under the terms of the
|
||||
Contributor Covenant, the maintainer of actix-web, @fafhrd91, promises to
|
||||
intervene to uphold that code of conduct.
|
||||
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
|
||||
maintainers of Actix web, promises to intervene to uphold that code of conduct.
|
||||
|
||||
@@ -1,15 +1,102 @@
|
||||
# Changes
|
||||
|
||||
## [0.1.0-alpha.4] - 2019-04-08
|
||||
## Unreleased - 2020-xx-xx
|
||||
|
||||
|
||||
## 0.4.1 - 2020-11-24
|
||||
* Clarify order of parameters in `Files::new` and improve docs.
|
||||
|
||||
|
||||
## 0.4.0 - 2020-10-06
|
||||
* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
|
||||
|
||||
[#1714]: https://github.com/actix/actix-web/pull/1714
|
||||
|
||||
|
||||
## 0.3.0 - 2020-09-11
|
||||
* No significant changes from 0.3.0-beta.1.
|
||||
|
||||
|
||||
## 0.3.0-beta.1 - 2020-07-15
|
||||
* Update `v_htmlescape` to 0.10
|
||||
* Update `actix-web` and `actix-http` dependencies to beta.1
|
||||
|
||||
|
||||
## 0.3.0-alpha.1 - 2020-05-23
|
||||
* Update `actix-web` and `actix-http` dependencies to alpha
|
||||
* Fix some typos in the docs
|
||||
* Bump minimum supported Rust version to 1.40
|
||||
* Support sending Content-Length when Content-Range is specified [#1384]
|
||||
|
||||
[#1384]: https://github.com/actix/actix-web/pull/1384
|
||||
|
||||
|
||||
## 0.2.1 - 2019-12-22
|
||||
* Use the same format for file URLs regardless of platforms
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-20
|
||||
* Fix BodyEncoding trait import #1220
|
||||
|
||||
|
||||
## 0.2.0-alpha.1 - 2019-12-07
|
||||
* Migrate to `std::future`
|
||||
|
||||
|
||||
## 0.1.7 - 2019-11-06
|
||||
* Add an additional `filename*` param in the `Content-Disposition` header of
|
||||
`actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
## 0.1.6 - 2019-10-14
|
||||
* Add option to redirect to a slash-ended path `Files` #1132
|
||||
|
||||
|
||||
## 0.1.5 - 2019-10-08
|
||||
* Bump up `mime_guess` crate version to 2.0.1
|
||||
* Bump up `percent-encoding` crate version to 2.1
|
||||
* Allow user defined request guards for `Files` #1113
|
||||
|
||||
|
||||
## 0.1.4 - 2019-07-20
|
||||
* Allow to disable `Content-Disposition` header #686
|
||||
|
||||
|
||||
## 0.1.3 - 2019-06-28
|
||||
* Do not set `Content-Length` header, let actix-http set it #930
|
||||
|
||||
|
||||
## 0.1.2 - 2019-06-13
|
||||
* Content-Length is 0 for NamedFile HEAD request #914
|
||||
* Fix ring dependency from actix-web default features for #741
|
||||
|
||||
|
||||
## 0.1.1 - 2019-06-01
|
||||
* Static files are incorrectly served as both chunked and with length #812
|
||||
|
||||
|
||||
## 0.1.0 - 2019-05-25
|
||||
* NamedFile last-modified check always fails due to nano-seconds in file modified date #820
|
||||
|
||||
|
||||
## 0.1.0-beta.4 - 2019-05-12
|
||||
* Update actix-web to beta.4
|
||||
|
||||
|
||||
## 0.1.0-beta.1 - 2019-04-20
|
||||
* Update actix-web to beta.1
|
||||
|
||||
|
||||
## 0.1.0-alpha.6 - 2019-04-14
|
||||
* Update actix-web to alpha6
|
||||
|
||||
|
||||
## 0.1.0-alpha.4 - 2019-04-08
|
||||
* Update actix-web to alpha4
|
||||
|
||||
|
||||
## [0.1.0-alpha.2] - 2019-04-02
|
||||
|
||||
## 0.1.0-alpha.2 - 2019-04-02
|
||||
* Add default handler support
|
||||
|
||||
|
||||
## [0.1.0-alpha.1] - 2019-03-28
|
||||
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
* Initial impl
|
||||
|
||||
@@ -1,34 +1,35 @@
|
||||
[package]
|
||||
name = "actix-files"
|
||||
version = "0.1.0-alpha.4"
|
||||
version = "0.4.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Static files support for actix web."
|
||||
description = "Static file serving for Actix Web"
|
||||
readme = "README.md"
|
||||
keywords = ["actix", "http", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
documentation = "https://docs.rs/actix-files/"
|
||||
categories = ["asynchronous", "web-programming::http-server"]
|
||||
license = "MIT/Apache-2.0"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
workspace = ".."
|
||||
|
||||
[lib]
|
||||
name = "actix_files"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
actix-web = "1.0.0-alpha.4"
|
||||
actix-service = "0.3.4"
|
||||
actix-web = { version = "3.0.0", default-features = false }
|
||||
actix-service = "1.0.6"
|
||||
bitflags = "1"
|
||||
bytes = "0.4"
|
||||
futures = "0.1.25"
|
||||
derive_more = "0.14"
|
||||
bytes = "0.5.3"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
derive_more = "0.99.2"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
mime_guess = "2.0.0-alpha"
|
||||
percent-encoding = "1.0"
|
||||
v_htmlescape = "0.4"
|
||||
mime_guess = "2.0.1"
|
||||
percent-encoding = "2.1"
|
||||
v_htmlescape = "0.11"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-web = { version = "1.0.0-alpha.4", features=["ssl"] }
|
||||
actix-rt = "1.0.0"
|
||||
actix-web = { version = "3.0.0", features = ["openssl"] }
|
||||
|
||||
1
actix-files/LICENSE-APACHE
Symbolic link
1
actix-files/LICENSE-APACHE
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
||||
1
actix-files/LICENSE-MIT
Symbolic link
1
actix-files/LICENSE-MIT
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
||||
@@ -1 +1,19 @@
|
||||
# Static files support for actix web [](https://travis-ci.org/actix/actix-web) [](https://codecov.io/gh/actix/actix-web) [](https://crates.io/crates/actix-files) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# actix-files
|
||||
|
||||
> Static file serving for Actix Web
|
||||
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://docs.rs/actix-files/0.4.1)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-files/0.4.1)
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-files/)
|
||||
- [Example Project](https://github.com/actix/examples/tree/master/static_index)
|
||||
- [Chat on Gitter](https://gitter.im/actix/actix-web)
|
||||
- Minimum supported Rust version: 1.42 or later
|
||||
|
||||
94
actix-files/src/chunked.rs
Normal file
94
actix-files/src/chunked.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use std::{
|
||||
cmp, fmt,
|
||||
fs::File,
|
||||
future::Future,
|
||||
io::{self, Read, Seek},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_web::{
|
||||
error::{BlockingError, Error},
|
||||
web,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures_core::{ready, Stream};
|
||||
use futures_util::future::{FutureExt, LocalBoxFuture};
|
||||
|
||||
use crate::handle_error;
|
||||
|
||||
type ChunkedBoxFuture =
|
||||
LocalBoxFuture<'static, Result<(File, Bytes), BlockingError<io::Error>>>;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// A helper created from a `std::fs::File` which reads the file
|
||||
/// chunk-by-chunk on a `ThreadPool`.
|
||||
pub struct ChunkedReadFile {
|
||||
pub(crate) size: u64,
|
||||
pub(crate) offset: u64,
|
||||
pub(crate) file: Option<File>,
|
||||
pub(crate) fut: Option<ChunkedBoxFuture>,
|
||||
pub(crate) counter: u64,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ChunkedReadFile {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("ChunkedReadFile")
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for ChunkedReadFile {
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
if let Some(ref mut fut) = self.fut {
|
||||
return match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok((file, bytes)) => {
|
||||
self.fut.take();
|
||||
self.file = Some(file);
|
||||
|
||||
self.offset += bytes.len() as u64;
|
||||
self.counter += bytes.len() as u64;
|
||||
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
Err(e) => Poll::Ready(Some(Err(handle_error(e)))),
|
||||
};
|
||||
}
|
||||
|
||||
let size = self.size;
|
||||
let offset = self.offset;
|
||||
let counter = self.counter;
|
||||
|
||||
if size == counter {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let mut file = self.file.take().expect("Use after completion");
|
||||
|
||||
self.fut = Some(
|
||||
web::block(move || {
|
||||
let max_bytes =
|
||||
cmp::min(size.saturating_sub(counter), 65_536) as usize;
|
||||
|
||||
let mut buf = Vec::with_capacity(max_bytes);
|
||||
file.seek(io::SeekFrom::Start(offset))?;
|
||||
|
||||
let n_bytes =
|
||||
file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
|
||||
|
||||
if n_bytes == 0 {
|
||||
return Err(io::ErrorKind::UnexpectedEof.into());
|
||||
}
|
||||
|
||||
Ok((file, Bytes::from(buf)))
|
||||
})
|
||||
.boxed_local(),
|
||||
);
|
||||
|
||||
self.poll_next(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
114
actix-files/src/directory.rs
Normal file
114
actix-files/src/directory.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
|
||||
|
||||
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
|
||||
use percent_encoding::{utf8_percent_encode, CONTROLS};
|
||||
use v_htmlescape::escape as escape_html_entity;
|
||||
|
||||
/// A directory; responds with the generated directory listing.
|
||||
#[derive(Debug)]
|
||||
pub struct Directory {
|
||||
/// Base directory.
|
||||
pub base: PathBuf,
|
||||
|
||||
/// Path of subdirectory to generate listing for.
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
impl Directory {
|
||||
/// Create a new directory
|
||||
pub fn new(base: PathBuf, path: PathBuf) -> Directory {
|
||||
Directory { base, path }
|
||||
}
|
||||
|
||||
/// Is this entry visible from this directory?
|
||||
pub fn is_visible(&self, entry: &io::Result<DirEntry>) -> bool {
|
||||
if let Ok(ref entry) = *entry {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.starts_with('.') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if let Ok(ref md) = entry.metadata() {
|
||||
let ft = md.file_type();
|
||||
return ft.is_dir() || ft.is_file() || ft.is_symlink();
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type DirectoryRenderer =
|
||||
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
|
||||
|
||||
// show file url as relative to static path
|
||||
macro_rules! encode_file_url {
|
||||
($path:ident) => {
|
||||
utf8_percent_encode(&$path, CONTROLS)
|
||||
};
|
||||
}
|
||||
|
||||
// " -- " & -- & ' -- ' < -- < > -- > / -- /
|
||||
macro_rules! encode_file_name {
|
||||
($entry:ident) => {
|
||||
escape_html_entity(&$entry.file_name().to_string_lossy())
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) fn directory_listing(
|
||||
dir: &Directory,
|
||||
req: &HttpRequest,
|
||||
) -> Result<ServiceResponse, io::Error> {
|
||||
let index_of = format!("Index of {}", req.path());
|
||||
let mut body = String::new();
|
||||
let base = Path::new(req.path());
|
||||
|
||||
for entry in dir.path.read_dir()? {
|
||||
if dir.is_visible(&entry) {
|
||||
let entry = entry.unwrap();
|
||||
let p = match entry.path().strip_prefix(&dir.path) {
|
||||
Ok(p) if cfg!(windows) => {
|
||||
base.join(p).to_string_lossy().replace("\\", "/")
|
||||
}
|
||||
Ok(p) => base.join(p).to_string_lossy().into_owned(),
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
// if file is a directory, add '/' to the end of the name
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
if metadata.is_dir() {
|
||||
let _ = write!(
|
||||
body,
|
||||
"<li><a href=\"{}\">{}/</a></li>",
|
||||
encode_file_url!(p),
|
||||
encode_file_name!(entry),
|
||||
);
|
||||
} else {
|
||||
let _ = write!(
|
||||
body,
|
||||
"<li><a href=\"{}\">{}</a></li>",
|
||||
encode_file_url!(p),
|
||||
encode_file_name!(entry),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let html = format!(
|
||||
"<html>\
|
||||
<head><title>{}</title></head>\
|
||||
<body><h1>{}</h1>\
|
||||
<ul>\
|
||||
{}\
|
||||
</ul></body>\n</html>",
|
||||
index_of, index_of, body
|
||||
);
|
||||
Ok(ServiceResponse::new(
|
||||
req.clone(),
|
||||
HttpResponse::Ok()
|
||||
.content_type("text/html; charset=utf-8")
|
||||
.body(html),
|
||||
))
|
||||
}
|
||||
52
actix-files/src/encoding.rs
Normal file
52
actix-files/src/encoding.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
use mime::Mime;
|
||||
|
||||
/// Transforms MIME `text/*` types into their UTF-8 equivalent, if supported.
|
||||
///
|
||||
/// MIME types that are converted
|
||||
/// - application/javascript
|
||||
/// - text/html
|
||||
/// - text/css
|
||||
/// - text/plain
|
||||
/// - text/csv
|
||||
/// - text/tab-separated-values
|
||||
pub(crate) fn equiv_utf8_text(ct: Mime) -> Mime {
|
||||
// use (roughly) order of file-type popularity for a web server
|
||||
|
||||
if ct == mime::APPLICATION_JAVASCRIPT {
|
||||
return mime::APPLICATION_JAVASCRIPT_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_HTML {
|
||||
return mime::TEXT_HTML_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_CSS {
|
||||
return mime::TEXT_CSS_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_PLAIN {
|
||||
return mime::TEXT_PLAIN_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_CSV {
|
||||
return mime::TEXT_CSV_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_TAB_SEPARATED_VALUES {
|
||||
return mime::TEXT_TAB_SEPARATED_VALUES_UTF_8;
|
||||
}
|
||||
|
||||
ct
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_equiv_utf8_text() {
|
||||
assert_eq!(equiv_utf8_text(mime::TEXT_PLAIN), mime::TEXT_PLAIN_UTF_8);
|
||||
assert_eq!(equiv_utf8_text(mime::TEXT_XML), mime::TEXT_XML);
|
||||
assert_eq!(equiv_utf8_text(mime::IMAGE_PNG), mime::IMAGE_PNG);
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ use derive_more::Display;
|
||||
#[derive(Display, Debug, PartialEq)]
|
||||
pub enum FilesError {
|
||||
/// Path is not a directory
|
||||
#[allow(dead_code)]
|
||||
#[display(fmt = "Path is not a directory. Unable to serve static files")]
|
||||
IsNotDirectory,
|
||||
|
||||
@@ -35,7 +36,7 @@ pub enum UriSegmentError {
|
||||
|
||||
/// Return `BadRequest` for `UriSegmentError`
|
||||
impl ResponseError for UriSegmentError {
|
||||
fn error_response(&self) -> HttpResponse {
|
||||
HttpResponse::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
271
actix-files/src/files.rs
Normal file
271
actix-files/src/files.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
|
||||
|
||||
use actix_service::{boxed, IntoServiceFactory, ServiceFactory};
|
||||
use actix_web::{
|
||||
dev::{
|
||||
AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse,
|
||||
},
|
||||
error::Error,
|
||||
guard::Guard,
|
||||
http::header::DispositionType,
|
||||
HttpRequest,
|
||||
};
|
||||
use futures_util::future::{ok, FutureExt, LocalBoxFuture};
|
||||
|
||||
use crate::{
|
||||
directory_listing, named, Directory, DirectoryRenderer, FilesService,
|
||||
HttpNewService, MimeOverride,
|
||||
};
|
||||
|
||||
/// Static files handling service.
|
||||
///
|
||||
/// `Files` service must be registered with `App::service()` method.
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_web::App;
|
||||
/// use actix_files::Files;
|
||||
///
|
||||
/// let app = App::new()
|
||||
/// .service(Files::new("/static", "."));
|
||||
/// ```
|
||||
pub struct Files {
|
||||
path: String,
|
||||
directory: PathBuf,
|
||||
index: Option<String>,
|
||||
show_index: bool,
|
||||
redirect_to_slash: bool,
|
||||
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
|
||||
renderer: Rc<DirectoryRenderer>,
|
||||
mime_override: Option<Rc<MimeOverride>>,
|
||||
file_flags: named::Flags,
|
||||
guards: Option<Rc<dyn Guard>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Files {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("Files")
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Files {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
directory: self.directory.clone(),
|
||||
index: self.index.clone(),
|
||||
show_index: self.show_index,
|
||||
redirect_to_slash: self.redirect_to_slash,
|
||||
default: self.default.clone(),
|
||||
renderer: self.renderer.clone(),
|
||||
file_flags: self.file_flags,
|
||||
path: self.path.clone(),
|
||||
mime_override: self.mime_override.clone(),
|
||||
guards: self.guards.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Files {
|
||||
/// Create new `Files` instance for a specified base directory.
|
||||
///
|
||||
/// # Argument Order
|
||||
/// The first argument (`mount_path`) is the root URL at which the static files are served.
|
||||
/// For example, `/assets` will serve files at `example.com/assets/...`.
|
||||
///
|
||||
/// The second argument (`serve_from`) is the location on disk at which files are loaded.
|
||||
/// This can be a relative path. For example, `./` would serve files from the current
|
||||
/// working directory.
|
||||
///
|
||||
/// # Implementation Notes
|
||||
/// If the mount path is set as the root path `/`, services registered after this one will
|
||||
/// be inaccessible. Register more specific handlers and services first.
|
||||
///
|
||||
/// `Files` uses a threadpool for blocking filesystem operations. By default, the pool uses a
|
||||
/// number of threads equal to 5x the number of available logical CPUs. Pool size can be changed
|
||||
/// by setting ACTIX_THREADPOOL environment variable.
|
||||
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
|
||||
let orig_dir = serve_from.into();
|
||||
let dir = match orig_dir.canonicalize() {
|
||||
Ok(canon_dir) => canon_dir,
|
||||
Err(_) => {
|
||||
log::error!("Specified path is not a directory: {:?}", orig_dir);
|
||||
PathBuf::new()
|
||||
}
|
||||
};
|
||||
|
||||
Files {
|
||||
path: mount_path.to_owned(),
|
||||
directory: dir,
|
||||
index: None,
|
||||
show_index: false,
|
||||
redirect_to_slash: false,
|
||||
default: Rc::new(RefCell::new(None)),
|
||||
renderer: Rc::new(directory_listing),
|
||||
mime_override: None,
|
||||
file_flags: named::Flags::default(),
|
||||
guards: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Show files listing for directories.
|
||||
///
|
||||
/// By default show files listing is disabled.
|
||||
pub fn show_files_listing(mut self) -> Self {
|
||||
self.show_index = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Redirects to a slash-ended path when browsing a directory.
|
||||
///
|
||||
/// By default never redirect.
|
||||
pub fn redirect_to_slash_directory(mut self) -> Self {
|
||||
self.redirect_to_slash = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set custom directory renderer
|
||||
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
|
||||
where
|
||||
for<'r, 's> F: Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error>
|
||||
+ 'static,
|
||||
{
|
||||
self.renderer = Rc::new(f);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies mime override callback
|
||||
pub fn mime_override<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
|
||||
{
|
||||
self.mime_override = Some(Rc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
/// Set index file
|
||||
///
|
||||
/// Shows specific index file for directory "/" instead of
|
||||
/// showing files listing.
|
||||
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
|
||||
self.index = Some(index.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether to use ETag or not.
|
||||
///
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_etag(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::ETAG, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether to use Last-Modified or not.
|
||||
///
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_last_modified(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::LAST_MD, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether text responses should signal a UTF-8 encoding.
|
||||
///
|
||||
/// Default is false (but will default to true in a future version).
|
||||
#[inline]
|
||||
pub fn prefer_utf8(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::PREFER_UTF8, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies custom guards to use for directory listings and files.
|
||||
///
|
||||
/// Default behaviour allows GET and HEAD.
|
||||
#[inline]
|
||||
pub fn use_guards<G: Guard + 'static>(mut self, guards: G) -> Self {
|
||||
self.guards = Some(Rc::new(guards));
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable `Content-Disposition` header.
|
||||
///
|
||||
/// By default Content-Disposition` header is enabled.
|
||||
#[inline]
|
||||
pub fn disable_content_disposition(mut self) -> Self {
|
||||
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets default handler which is used when no matched file could be found.
|
||||
pub fn default_handler<F, U>(mut self, f: F) -> Self
|
||||
where
|
||||
F: IntoServiceFactory<U>,
|
||||
U: ServiceFactory<
|
||||
Config = (),
|
||||
Request = ServiceRequest,
|
||||
Response = ServiceResponse,
|
||||
Error = Error,
|
||||
> + 'static,
|
||||
{
|
||||
// create and configure default resource
|
||||
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
|
||||
f.into_factory().map_init_err(|_| ()),
|
||||
)))));
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpServiceFactory for Files {
|
||||
fn register(self, config: &mut AppService) {
|
||||
if self.default.borrow().is_none() {
|
||||
*self.default.borrow_mut() = Some(config.default_service());
|
||||
}
|
||||
|
||||
let rdef = if config.is_root() {
|
||||
ResourceDef::root_prefix(&self.path)
|
||||
} else {
|
||||
ResourceDef::prefix(&self.path)
|
||||
};
|
||||
|
||||
config.register_service(rdef, None, self, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceFactory for Files {
|
||||
type Request = ServiceRequest;
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
type Service = FilesService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let mut srv = FilesService {
|
||||
directory: self.directory.clone(),
|
||||
index: self.index.clone(),
|
||||
show_index: self.show_index,
|
||||
redirect_to_slash: self.redirect_to_slash,
|
||||
default: None,
|
||||
renderer: self.renderer.clone(),
|
||||
mime_override: self.mime_override.clone(),
|
||||
file_flags: self.file_flags,
|
||||
guards: self.guards.clone(),
|
||||
};
|
||||
|
||||
if let Some(ref default) = *self.default.borrow() {
|
||||
default
|
||||
.new_service(())
|
||||
.map(move |result| match result {
|
||||
Ok(default) => {
|
||||
srv.default = Some(default);
|
||||
Ok(srv)
|
||||
}
|
||||
Err(_) => Err(()),
|
||||
})
|
||||
.boxed_local()
|
||||
} else {
|
||||
ok(srv).boxed_local()
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,30 +7,36 @@ use std::time::{SystemTime, UNIX_EPOCH};
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
use bitflags::bitflags;
|
||||
use mime;
|
||||
use mime_guess::guess_mime_type;
|
||||
|
||||
use actix_web::http::header::{
|
||||
self, ContentDisposition, DispositionParam, DispositionType,
|
||||
use actix_web::{
|
||||
dev::{BodyEncoding, SizedStream},
|
||||
http::{
|
||||
header::{
|
||||
self, Charset, ContentDisposition, DispositionParam, DispositionType,
|
||||
ExtendedValue,
|
||||
},
|
||||
ContentEncoding, StatusCode,
|
||||
},
|
||||
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
|
||||
};
|
||||
use actix_web::http::{ContentEncoding, Method, StatusCode};
|
||||
use actix_web::middleware::encoding::BodyEncoding;
|
||||
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
|
||||
use bitflags::bitflags;
|
||||
use futures_util::future::{ready, Ready};
|
||||
use mime_guess::from_path;
|
||||
|
||||
use crate::range::HttpRange;
|
||||
use crate::ChunkedReadFile;
|
||||
use crate::{encoding::equiv_utf8_text, range::HttpRange};
|
||||
|
||||
bitflags! {
|
||||
pub(crate) struct Flags: u32 {
|
||||
const ETAG = 0b00000001;
|
||||
const LAST_MD = 0b00000010;
|
||||
pub(crate) struct Flags: u8 {
|
||||
const ETAG = 0b0000_0001;
|
||||
const LAST_MD = 0b0000_0010;
|
||||
const CONTENT_DISPOSITION = 0b0000_0100;
|
||||
const PREFER_UTF8 = 0b0000_1000;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Flags {
|
||||
fn default() -> Self {
|
||||
Flags::all()
|
||||
Flags::from_bits_truncate(0b0000_0111)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,13 +45,13 @@ impl Default for Flags {
|
||||
pub struct NamedFile {
|
||||
path: PathBuf,
|
||||
file: File,
|
||||
modified: Option<SystemTime>,
|
||||
pub(crate) md: Metadata,
|
||||
pub(crate) flags: Flags,
|
||||
pub(crate) status_code: StatusCode,
|
||||
pub(crate) content_type: mime::Mime,
|
||||
pub(crate) content_disposition: header::ContentDisposition,
|
||||
pub(crate) md: Metadata,
|
||||
modified: Option<SystemTime>,
|
||||
pub(crate) encoding: Option<ContentEncoding>,
|
||||
pub(crate) status_code: StatusCode,
|
||||
pub(crate) flags: Flags,
|
||||
}
|
||||
|
||||
impl NamedFile {
|
||||
@@ -66,6 +72,7 @@ impl NamedFile {
|
||||
/// let mut file = File::create("foo.txt")?;
|
||||
/// file.write_all(b"Hello, world!")?;
|
||||
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
|
||||
/// # std::fs::remove_file("foo.txt");
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
@@ -85,21 +92,36 @@ impl NamedFile {
|
||||
}
|
||||
};
|
||||
|
||||
let ct = guess_mime_type(&path);
|
||||
let disposition_type = match ct.type_() {
|
||||
let ct = from_path(&path).first_or_octet_stream();
|
||||
|
||||
let disposition = match ct.type_() {
|
||||
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
|
||||
_ => DispositionType::Attachment,
|
||||
};
|
||||
|
||||
let mut parameters =
|
||||
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
|
||||
|
||||
if !filename.is_ascii() {
|
||||
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
|
||||
charset: Charset::Ext(String::from("UTF-8")),
|
||||
language_tag: None,
|
||||
value: filename.into_owned().into_bytes(),
|
||||
}))
|
||||
}
|
||||
|
||||
let cd = ContentDisposition {
|
||||
disposition: disposition_type,
|
||||
parameters: vec![DispositionParam::Filename(filename.into_owned())],
|
||||
disposition,
|
||||
parameters,
|
||||
};
|
||||
|
||||
(ct, cd)
|
||||
};
|
||||
|
||||
let md = file.metadata()?;
|
||||
let modified = md.modified().ok();
|
||||
let encoding = None;
|
||||
|
||||
Ok(NamedFile {
|
||||
path,
|
||||
file,
|
||||
@@ -170,11 +192,21 @@ impl NamedFile {
|
||||
/// sent to the peer. By default the disposition is `inline` for text,
|
||||
/// image, and video content types, and `attachment` otherwise, and
|
||||
/// the filename is taken from the path provided in the `open` method
|
||||
/// after converting it to UTF-8 using
|
||||
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
|
||||
/// after converting it to UTF-8 using.
|
||||
/// [`std::ffi::OsStr::to_string_lossy`]
|
||||
#[inline]
|
||||
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
|
||||
self.content_disposition = cd;
|
||||
self.flags.insert(Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable `Content-Disposition` header.
|
||||
///
|
||||
/// By default Content-Disposition` header is enabled.
|
||||
#[inline]
|
||||
pub fn disable_content_disposition(mut self) -> Self {
|
||||
self.flags.remove(Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -185,24 +217,33 @@ impl NamedFile {
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
///Specifies whether to use ETag or not.
|
||||
/// Specifies whether to use ETag or not.
|
||||
///
|
||||
///Default is true.
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_etag(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::ETAG, value);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
///Specifies whether to use Last-Modified or not.
|
||||
/// Specifies whether to use Last-Modified or not.
|
||||
///
|
||||
///Default is true.
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_last_modified(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::LAST_MD, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether text responses should signal a UTF-8 encoding.
|
||||
///
|
||||
/// Default is false (but will default to true in a future version).
|
||||
#[inline]
|
||||
pub fn prefer_utf8(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::PREFER_UTF8, value);
|
||||
self
|
||||
}
|
||||
|
||||
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
|
||||
// This etag format is similar to Apache's.
|
||||
self.modified.as_ref().map(|mtime| {
|
||||
@@ -220,6 +261,7 @@ impl NamedFile {
|
||||
let dur = mtime
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("modification time must be after epoch");
|
||||
|
||||
header::EntityTag::strong(format!(
|
||||
"{:x}:{:x}:{:x}:{:x}",
|
||||
ino,
|
||||
@@ -233,6 +275,169 @@ impl NamedFile {
|
||||
pub(crate) fn last_modified(&self) -> Option<header::HttpDate> {
|
||||
self.modified.map(|mtime| mtime.into())
|
||||
}
|
||||
|
||||
/// Creates an `HttpResponse` with file as a streaming body.
|
||||
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
|
||||
if self.status_code != StatusCode::OK {
|
||||
let mut res = HttpResponse::build(self.status_code);
|
||||
|
||||
if self.flags.contains(Flags::PREFER_UTF8) {
|
||||
let ct = equiv_utf8_text(self.content_type.clone());
|
||||
res.header(header::CONTENT_TYPE, ct.to_string());
|
||||
} else {
|
||||
res.header(header::CONTENT_TYPE, self.content_type.to_string());
|
||||
}
|
||||
|
||||
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||
res.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
res.encoding(current_encoding);
|
||||
}
|
||||
|
||||
let reader = ChunkedReadFile {
|
||||
size: self.md.len(),
|
||||
offset: 0,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
|
||||
return Ok(res.streaming(reader));
|
||||
}
|
||||
|
||||
let etag = if self.flags.contains(Flags::ETAG) {
|
||||
self.etag()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let last_modified = if self.flags.contains(Flags::LAST_MD) {
|
||||
self.last_modified()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// check preconditions
|
||||
let precondition_failed = if !any_match(etag.as_ref(), req) {
|
||||
true
|
||||
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
let t1: SystemTime = m.clone().into();
|
||||
let t2: SystemTime = since.clone().into();
|
||||
|
||||
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||
(Ok(t1), Ok(t2)) => t1 > t2,
|
||||
_ => false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// check last modified
|
||||
let not_modified = if !none_match(etag.as_ref(), req) {
|
||||
true
|
||||
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
|
||||
false
|
||||
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
let t1: SystemTime = m.clone().into();
|
||||
let t2: SystemTime = since.clone().into();
|
||||
|
||||
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||
(Ok(t1), Ok(t2)) => t1 <= t2,
|
||||
_ => false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let mut resp = HttpResponse::build(self.status_code);
|
||||
|
||||
if self.flags.contains(Flags::PREFER_UTF8) {
|
||||
let ct = equiv_utf8_text(self.content_type.clone());
|
||||
resp.header(header::CONTENT_TYPE, ct.to_string());
|
||||
} else {
|
||||
resp.header(header::CONTENT_TYPE, self.content_type.to_string());
|
||||
}
|
||||
|
||||
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||
resp.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
// default compressing
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
resp.encoding(current_encoding);
|
||||
}
|
||||
|
||||
if let Some(lm) = last_modified {
|
||||
resp.header(header::LAST_MODIFIED, lm.to_string());
|
||||
}
|
||||
|
||||
if let Some(etag) = etag {
|
||||
resp.header(header::ETAG, etag.to_string());
|
||||
}
|
||||
|
||||
resp.header(header::ACCEPT_RANGES, "bytes");
|
||||
|
||||
let mut length = self.md.len();
|
||||
let mut offset = 0;
|
||||
|
||||
// check for range header
|
||||
if let Some(ranges) = req.headers().get(header::RANGE) {
|
||||
if let Ok(ranges_header) = ranges.to_str() {
|
||||
if let Ok(ranges) = HttpRange::parse(ranges_header, length) {
|
||||
length = ranges[0].length;
|
||||
offset = ranges[0].start;
|
||||
|
||||
resp.encoding(ContentEncoding::Identity);
|
||||
resp.header(
|
||||
header::CONTENT_RANGE,
|
||||
format!(
|
||||
"bytes {}-{}/{}",
|
||||
offset,
|
||||
offset + length - 1,
|
||||
self.md.len()
|
||||
),
|
||||
);
|
||||
} else {
|
||||
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
|
||||
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
|
||||
};
|
||||
} else {
|
||||
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
|
||||
};
|
||||
};
|
||||
|
||||
if precondition_failed {
|
||||
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
|
||||
} else if not_modified {
|
||||
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
|
||||
}
|
||||
|
||||
let reader = ChunkedReadFile {
|
||||
offset,
|
||||
size: length,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
|
||||
if offset != 0 || length != self.md.len() {
|
||||
resp.status(StatusCode::PARTIAL_CONTENT);
|
||||
}
|
||||
|
||||
Ok(resp.body(SizedStream::new(length, reader)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for NamedFile {
|
||||
@@ -253,6 +458,7 @@ impl DerefMut for NamedFile {
|
||||
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
match req.get_header::<header::IfMatch>() {
|
||||
None | Some(header::IfMatch::Any) => true,
|
||||
|
||||
Some(header::IfMatch::Items(ref items)) => {
|
||||
if let Some(some_etag) = etag {
|
||||
for item in items {
|
||||
@@ -261,6 +467,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -270,6 +477,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
match req.get_header::<header::IfNoneMatch>() {
|
||||
Some(header::IfNoneMatch::Any) => false,
|
||||
|
||||
Some(header::IfNoneMatch::Items(ref items)) => {
|
||||
if let Some(some_etag) = etag {
|
||||
for item in items {
|
||||
@@ -278,152 +486,19 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
impl Responder for NamedFile {
|
||||
type Error = Error;
|
||||
type Future = Result<HttpResponse, Error>;
|
||||
type Future = Ready<Result<HttpResponse, Error>>;
|
||||
|
||||
fn respond_to(self, req: &HttpRequest) -> Self::Future {
|
||||
if self.status_code != StatusCode::OK {
|
||||
let mut resp = HttpResponse::build(self.status_code);
|
||||
resp.set(header::ContentType(self.content_type.clone()))
|
||||
.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
resp.encoding(current_encoding);
|
||||
}
|
||||
let reader = ChunkedReadFile {
|
||||
size: self.md.len(),
|
||||
offset: 0,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
return Ok(resp.streaming(reader));
|
||||
}
|
||||
|
||||
match req.method() {
|
||||
&Method::HEAD | &Method::GET => (),
|
||||
_ => {
|
||||
return Ok(HttpResponse::MethodNotAllowed()
|
||||
.header(header::CONTENT_TYPE, "text/plain")
|
||||
.header(header::ALLOW, "GET, HEAD")
|
||||
.body("This resource only supports GET and HEAD."));
|
||||
}
|
||||
}
|
||||
|
||||
let etag = if self.flags.contains(Flags::ETAG) {
|
||||
self.etag()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let last_modified = if self.flags.contains(Flags::LAST_MD) {
|
||||
self.last_modified()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// check preconditions
|
||||
let precondition_failed = if !any_match(etag.as_ref(), req) {
|
||||
true
|
||||
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
m > since
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// check last modified
|
||||
let not_modified = if !none_match(etag.as_ref(), req) {
|
||||
true
|
||||
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
|
||||
false
|
||||
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
m <= since
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let mut resp = HttpResponse::build(self.status_code);
|
||||
resp.set(header::ContentType(self.content_type.clone()))
|
||||
.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
// default compressing
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
resp.encoding(current_encoding);
|
||||
}
|
||||
|
||||
resp.if_some(last_modified, |lm, resp| {
|
||||
resp.set(header::LastModified(lm));
|
||||
})
|
||||
.if_some(etag, |etag, resp| {
|
||||
resp.set(header::ETag(etag));
|
||||
});
|
||||
|
||||
resp.header(header::ACCEPT_RANGES, "bytes");
|
||||
|
||||
let mut length = self.md.len();
|
||||
let mut offset = 0;
|
||||
|
||||
// check for range header
|
||||
if let Some(ranges) = req.headers().get(&header::RANGE) {
|
||||
if let Ok(rangesheader) = ranges.to_str() {
|
||||
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
|
||||
length = rangesvec[0].length;
|
||||
offset = rangesvec[0].start;
|
||||
resp.encoding(ContentEncoding::Identity);
|
||||
resp.header(
|
||||
header::CONTENT_RANGE,
|
||||
format!(
|
||||
"bytes {}-{}/{}",
|
||||
offset,
|
||||
offset + length - 1,
|
||||
self.md.len()
|
||||
),
|
||||
);
|
||||
} else {
|
||||
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
|
||||
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
|
||||
};
|
||||
} else {
|
||||
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
|
||||
};
|
||||
};
|
||||
|
||||
resp.header(header::CONTENT_LENGTH, format!("{}", length));
|
||||
|
||||
if precondition_failed {
|
||||
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
|
||||
} else if not_modified {
|
||||
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
|
||||
}
|
||||
|
||||
if *req.method() == Method::HEAD {
|
||||
Ok(resp.finish())
|
||||
} else {
|
||||
let reader = ChunkedReadFile {
|
||||
offset,
|
||||
size: length,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
if offset != 0 || length != self.md.len() {
|
||||
return Ok(resp.status(StatusCode::PARTIAL_CONTENT).streaming(reader));
|
||||
};
|
||||
Ok(resp.streaming(reader))
|
||||
}
|
||||
ready(self.into_response(req))
|
||||
}
|
||||
}
|
||||
|
||||
99
actix-files/src/path_buf.rs
Normal file
99
actix-files/src/path_buf.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use actix_web::{dev::Payload, FromRequest, HttpRequest};
|
||||
use futures_util::future::{ready, Ready};
|
||||
|
||||
use crate::error::UriSegmentError;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PathBufWrap(PathBuf);
|
||||
|
||||
impl FromStr for PathBufWrap {
|
||||
type Err = UriSegmentError;
|
||||
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let mut buf = PathBuf::new();
|
||||
|
||||
for segment in path.split('/') {
|
||||
if segment == ".." {
|
||||
buf.pop();
|
||||
} else if segment.starts_with('.') {
|
||||
return Err(UriSegmentError::BadStart('.'));
|
||||
} else if segment.starts_with('*') {
|
||||
return Err(UriSegmentError::BadStart('*'));
|
||||
} else if segment.ends_with(':') {
|
||||
return Err(UriSegmentError::BadEnd(':'));
|
||||
} else if segment.ends_with('>') {
|
||||
return Err(UriSegmentError::BadEnd('>'));
|
||||
} else if segment.ends_with('<') {
|
||||
return Err(UriSegmentError::BadEnd('<'));
|
||||
} else if segment.is_empty() {
|
||||
continue;
|
||||
} else if cfg!(windows) && segment.contains('\\') {
|
||||
return Err(UriSegmentError::BadChar('\\'));
|
||||
} else {
|
||||
buf.push(segment)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PathBufWrap(buf))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Path> for PathBufWrap {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRequest for PathBufWrap {
|
||||
type Error = UriSegmentError;
|
||||
type Future = Ready<Result<Self, Self::Error>>;
|
||||
type Config = ();
|
||||
|
||||
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
|
||||
ready(req.match_info().path().parse())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_path_buf() {
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/.tt").map(|t| t.0),
|
||||
Err(UriSegmentError::BadStart('.'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/*tt").map(|t| t.0),
|
||||
Err(UriSegmentError::BadStart('*'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt:").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd(':'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt<").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd('<'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt>").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd('>'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/seg1/seg2/").unwrap().0,
|
||||
PathBuf::from_iter(vec!["seg1", "seg2"])
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/seg1/../seg2/").unwrap().0,
|
||||
PathBuf::from_iter(vec!["seg2"])
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,14 @@
|
||||
/// HTTP Range header representation.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct HttpRange {
|
||||
/// Start of range.
|
||||
pub start: u64,
|
||||
|
||||
/// Length of range.
|
||||
pub length: u64,
|
||||
}
|
||||
|
||||
static PREFIX: &'static str = "bytes=";
|
||||
const PREFIX: &str = "bytes=";
|
||||
const PREFIX_LEN: usize = 6;
|
||||
|
||||
impl HttpRange {
|
||||
|
||||
167
actix-files/src/service.rs
Normal file
167
actix-files/src/service.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use std::{
|
||||
fmt, io,
|
||||
path::PathBuf,
|
||||
rc::Rc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_service::Service;
|
||||
use actix_web::{
|
||||
dev::{ServiceRequest, ServiceResponse},
|
||||
error::Error,
|
||||
guard::Guard,
|
||||
http::{header, Method},
|
||||
HttpResponse,
|
||||
};
|
||||
use futures_util::future::{ok, Either, LocalBoxFuture, Ready};
|
||||
|
||||
use crate::{
|
||||
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride,
|
||||
NamedFile, PathBufWrap,
|
||||
};
|
||||
|
||||
/// Assembled file serving service.
|
||||
pub struct FilesService {
|
||||
pub(crate) directory: PathBuf,
|
||||
pub(crate) index: Option<String>,
|
||||
pub(crate) show_index: bool,
|
||||
pub(crate) redirect_to_slash: bool,
|
||||
pub(crate) default: Option<HttpService>,
|
||||
pub(crate) renderer: Rc<DirectoryRenderer>,
|
||||
pub(crate) mime_override: Option<Rc<MimeOverride>>,
|
||||
pub(crate) file_flags: named::Flags,
|
||||
pub(crate) guards: Option<Rc<dyn Guard>>,
|
||||
}
|
||||
|
||||
type FilesServiceFuture = Either<
|
||||
Ready<Result<ServiceResponse, Error>>,
|
||||
LocalBoxFuture<'static, Result<ServiceResponse, Error>>,
|
||||
>;
|
||||
|
||||
impl FilesService {
|
||||
fn handle_err(&mut self, e: io::Error, req: ServiceRequest) -> FilesServiceFuture {
|
||||
log::debug!("Failed to handle {}: {}", req.path(), e);
|
||||
|
||||
if let Some(ref mut default) = self.default {
|
||||
Either::Right(default.call(req))
|
||||
} else {
|
||||
Either::Left(ok(req.error_response(e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for FilesService {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("FilesService")
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for FilesService {
|
||||
type Request = ServiceRequest;
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Future = FilesServiceFuture;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: ServiceRequest) -> Self::Future {
|
||||
let is_method_valid = if let Some(guard) = &self.guards {
|
||||
// execute user defined guards
|
||||
(**guard).check(req.head())
|
||||
} else {
|
||||
// default behavior
|
||||
matches!(*req.method(), Method::HEAD | Method::GET)
|
||||
};
|
||||
|
||||
if !is_method_valid {
|
||||
return Either::Left(ok(req.into_response(
|
||||
actix_web::HttpResponse::MethodNotAllowed()
|
||||
.header(header::CONTENT_TYPE, "text/plain")
|
||||
.body("Request did not meet this resource's requirements."),
|
||||
)));
|
||||
}
|
||||
|
||||
let real_path: PathBufWrap = match req.match_info().path().parse() {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Either::Left(ok(req.error_response(e))),
|
||||
};
|
||||
|
||||
// full file path
|
||||
let path = match self.directory.join(&real_path).canonicalize() {
|
||||
Ok(path) => path,
|
||||
Err(e) => return self.handle_err(e, req),
|
||||
};
|
||||
|
||||
if path.is_dir() {
|
||||
if let Some(ref redir_index) = self.index {
|
||||
if self.redirect_to_slash && !req.path().ends_with('/') {
|
||||
let redirect_to = format!("{}/", req.path());
|
||||
|
||||
return Either::Left(ok(req.into_response(
|
||||
HttpResponse::Found()
|
||||
.header(header::LOCATION, redirect_to)
|
||||
.body("")
|
||||
.into_body(),
|
||||
)));
|
||||
}
|
||||
|
||||
let path = path.join(redir_index);
|
||||
|
||||
match NamedFile::open(path) {
|
||||
Ok(mut named_file) => {
|
||||
if let Some(ref mime_override) = self.mime_override {
|
||||
let new_disposition =
|
||||
mime_override(&named_file.content_type.type_());
|
||||
named_file.content_disposition.disposition = new_disposition;
|
||||
}
|
||||
named_file.flags = self.file_flags;
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
Either::Left(ok(match named_file.into_response(&req) {
|
||||
Ok(item) => ServiceResponse::new(req, item),
|
||||
Err(e) => ServiceResponse::from_err(e, req),
|
||||
}))
|
||||
}
|
||||
Err(e) => self.handle_err(e, req),
|
||||
}
|
||||
} else if self.show_index {
|
||||
let dir = Directory::new(self.directory.clone(), path);
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
let x = (self.renderer)(&dir, &req);
|
||||
|
||||
match x {
|
||||
Ok(resp) => Either::Left(ok(resp)),
|
||||
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
|
||||
}
|
||||
} else {
|
||||
Either::Left(ok(ServiceResponse::from_err(
|
||||
FilesError::IsDirectory,
|
||||
req.into_parts().0,
|
||||
)))
|
||||
}
|
||||
} else {
|
||||
match NamedFile::open(path) {
|
||||
Ok(mut named_file) => {
|
||||
if let Some(ref mime_override) = self.mime_override {
|
||||
let new_disposition =
|
||||
mime_override(&named_file.content_type.type_());
|
||||
named_file.content_disposition.disposition = new_disposition;
|
||||
}
|
||||
named_file.flags = self.file_flags;
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
match named_file.into_response(&req) {
|
||||
Ok(item) => {
|
||||
Either::Left(ok(ServiceResponse::new(req.clone(), item)))
|
||||
}
|
||||
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
|
||||
}
|
||||
}
|
||||
Err(e) => self.handle_err(e, req),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
40
actix-files/tests/encoding.rs
Normal file
40
actix-files/tests/encoding.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use actix_files::Files;
|
||||
use actix_web::{
|
||||
http::{
|
||||
header::{self, HeaderValue},
|
||||
StatusCode,
|
||||
},
|
||||
test::{self, TestRequest},
|
||||
App,
|
||||
};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_utf8_file_contents() {
|
||||
// use default ISO-8859-1 encoding
|
||||
let mut srv =
|
||||
test::init_service(App::new().service(Files::new("/", "./tests"))).await;
|
||||
|
||||
let req = TestRequest::with_uri("/utf8.txt").to_request();
|
||||
let res = test::call_service(&mut srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
res.headers().get(header::CONTENT_TYPE),
|
||||
Some(&HeaderValue::from_static("text/plain")),
|
||||
);
|
||||
|
||||
// prefer UTF-8 encoding
|
||||
let mut srv = test::init_service(
|
||||
App::new().service(Files::new("/", "./tests").prefer_utf8(true)),
|
||||
)
|
||||
.await;
|
||||
|
||||
let req = TestRequest::with_uri("/utf8.txt").to_request();
|
||||
let res = test::call_service(&mut srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
res.headers().get(header::CONTENT_TYPE),
|
||||
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
|
||||
);
|
||||
}
|
||||
3
actix-files/tests/utf8.txt
Normal file
3
actix-files/tests/utf8.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
中文内容显示正确。
|
||||
|
||||
English is OK.
|
||||
@@ -1,41 +0,0 @@
|
||||
environment:
|
||||
global:
|
||||
PROJECT_NAME: actix-http
|
||||
matrix:
|
||||
# Stable channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
# Nightly channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- ps: >-
|
||||
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\msys64\mingw64\bin'
|
||||
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\MinGW\bin'
|
||||
}
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs
|
||||
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustc -Vv
|
||||
- cargo -V
|
||||
|
||||
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
|
||||
build: false
|
||||
|
||||
# Equivalent to Travis' `script` phase
|
||||
test_script:
|
||||
- cargo clean
|
||||
- cargo test
|
||||
@@ -1,9 +1,421 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
|
||||
|
||||
## 2.2.2 - 2022-01-21
|
||||
### Changed
|
||||
- Migrate to `brotli` crate. [ad7e3c06]
|
||||
|
||||
[ad7e3c06]: https://github.com/actix/actix-web/commit/ad7e3c06
|
||||
|
||||
|
||||
## 2.2.1 - 2021-08-09
|
||||
### Fixed
|
||||
- Potential HTTP request smuggling vulnerabilities. [RUSTSEC-2021-0081](https://github.com/rustsec/advisory-db/pull/977)
|
||||
|
||||
|
||||
## 2.2.0 - 2020-11-25
|
||||
### Added
|
||||
* HttpResponse builders for 1xx status codes. [#1768]
|
||||
* `Accept::mime_precedence` and `Accept::mime_preference`. [#1793]
|
||||
* `TryFrom<u16>` and `TryFrom<f32>` for `http::header::Quality`. [#1797]
|
||||
|
||||
### Fixed
|
||||
* Started dropping `transfer-encoding: chunked` and `Content-Length` for 1XX and 204 responses. [#1767]
|
||||
|
||||
### Changed
|
||||
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
|
||||
|
||||
[#1773]: https://github.com/actix/actix-web/pull/1773
|
||||
[#1767]: https://github.com/actix/actix-web/pull/1767
|
||||
[#1768]: https://github.com/actix/actix-web/pull/1768
|
||||
[#1793]: https://github.com/actix/actix-web/pull/1793
|
||||
[#1797]: https://github.com/actix/actix-web/pull/1797
|
||||
|
||||
|
||||
## 2.1.0 - 2020-10-30
|
||||
### Added
|
||||
* Added more flexible `on_connect_ext` methods for on-connect handling. [#1754]
|
||||
|
||||
### Changed
|
||||
* Upgrade `base64` to `0.13`. [#1744]
|
||||
* Upgrade `pin-project` to `1.0`. [#1733]
|
||||
* Deprecate `ResponseBuilder::{if_some, if_true}`. [#1760]
|
||||
|
||||
[#1760]: https://github.com/actix/actix-web/pull/1760
|
||||
[#1754]: https://github.com/actix/actix-web/pull/1754
|
||||
[#1733]: https://github.com/actix/actix-web/pull/1733
|
||||
[#1744]: https://github.com/actix/actix-web/pull/1744
|
||||
|
||||
|
||||
## 2.0.0 - 2020-09-11
|
||||
* No significant changes from `2.0.0-beta.4`.
|
||||
|
||||
|
||||
## 2.0.0-beta.4 - 2020-09-09
|
||||
### Changed
|
||||
* Update actix-codec and actix-utils dependencies.
|
||||
* Update actix-connect and actix-tls dependencies.
|
||||
|
||||
|
||||
## [2.0.0-beta.3] - 2020-08-14
|
||||
|
||||
### Fixed
|
||||
* Memory leak of `client::pool::ConnectorPoolSupport`. [#1626]
|
||||
|
||||
[#1626]: https://github.com/actix/actix-web/pull/1626
|
||||
|
||||
|
||||
## [2.0.0-beta.2] - 2020-07-21
|
||||
### Fixed
|
||||
* Potential UB in h1 decoder using uninitialized memory. [#1614]
|
||||
|
||||
### Changed
|
||||
* Fix illegal chunked encoding. [#1615]
|
||||
|
||||
[#1614]: https://github.com/actix/actix-web/pull/1614
|
||||
[#1615]: https://github.com/actix/actix-web/pull/1615
|
||||
|
||||
|
||||
## [2.0.0-beta.1] - 2020-07-11
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrate cookie handling to `cookie` crate. [#1558]
|
||||
* Update `sha-1` to 0.9. [#1586]
|
||||
* Fix leak in client pool. [#1580]
|
||||
* MSRV is now 1.41.1.
|
||||
|
||||
[#1558]: https://github.com/actix/actix-web/pull/1558
|
||||
[#1586]: https://github.com/actix/actix-web/pull/1586
|
||||
[#1580]: https://github.com/actix/actix-web/pull/1580
|
||||
|
||||
## [2.0.0-alpha.4] - 2020-05-21
|
||||
|
||||
### Changed
|
||||
|
||||
* Bump minimum supported Rust version to 1.40
|
||||
* content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439]
|
||||
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
`u64` instead of a `usize`.
|
||||
* Update `base64` dependency to 0.12
|
||||
|
||||
### Fixed
|
||||
|
||||
* Support parsing of `SameSite=None` [#1503]
|
||||
|
||||
[#1439]: https://github.com/actix/actix-web/pull/1439
|
||||
[#1503]: https://github.com/actix/actix-web/pull/1503
|
||||
|
||||
## [2.0.0-alpha.3] - 2020-05-08
|
||||
|
||||
### Fixed
|
||||
|
||||
* Correct spelling of ConnectError::Unresolved [#1487]
|
||||
* Fix a mistake in the encoding of websocket continuation messages wherein
|
||||
Item::FirstText and Item::FirstBinary are each encoded as the other.
|
||||
|
||||
### Changed
|
||||
|
||||
* Implement `std::error::Error` for our custom errors [#1422]
|
||||
* Remove `failure` support for `ResponseError` since that crate
|
||||
will be deprecated in the near future.
|
||||
|
||||
[#1422]: https://github.com/actix/actix-web/pull/1422
|
||||
[#1487]: https://github.com/actix/actix-web/pull/1487
|
||||
|
||||
## [2.0.0-alpha.2] - 2020-03-07
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
|
||||
|
||||
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively
|
||||
to improve download speed for awc when downloading large objects. [#1394]
|
||||
|
||||
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
|
||||
|
||||
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
|
||||
|
||||
[#1394]: https://github.com/actix/actix-web/pull/1394
|
||||
[#1395]: https://github.com/actix/actix-web/pull/1395
|
||||
|
||||
## [2.0.0-alpha.1] - 2020-02-27
|
||||
|
||||
### Changed
|
||||
|
||||
* Update the `time` dependency to 0.2.7.
|
||||
* Moved actors messages support from actix crate, enabled with feature `actors`.
|
||||
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next().
|
||||
* MessageBody is not implemented for &'static [u8] anymore.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Allow `SameSite=None` cookies to be sent in a response.
|
||||
|
||||
## [1.0.1] - 2019-12-20
|
||||
|
||||
### Fixed
|
||||
|
||||
* Poll upgrade service's readiness from HTTP service handlers
|
||||
|
||||
* Replace brotli with brotli2 #1224
|
||||
|
||||
## [1.0.0] - 2019-12-13
|
||||
|
||||
### Added
|
||||
|
||||
* Add websockets continuation frame support
|
||||
|
||||
### Changed
|
||||
|
||||
* Replace `flate2-xxx` features with `compress`
|
||||
|
||||
## [1.0.0-alpha.5] - 2019-12-09
|
||||
|
||||
### Fixed
|
||||
|
||||
* Check `Upgrade` service readiness before calling it
|
||||
|
||||
* Fix buffer remaining capacity calcualtion
|
||||
|
||||
### Changed
|
||||
|
||||
* Websockets: Ping and Pong should have binary data #1049
|
||||
|
||||
## [1.0.0-alpha.4] - 2019-12-08
|
||||
|
||||
### Added
|
||||
|
||||
* Add impl ResponseBuilder for Error
|
||||
|
||||
### Changed
|
||||
|
||||
* Use rust based brotli compression library
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-07
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
|
||||
* Migrate to `std::future`
|
||||
|
||||
|
||||
## [0.2.11] - 2019-11-06
|
||||
|
||||
### Added
|
||||
|
||||
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
|
||||
|
||||
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
* Allow to use `std::convert::Infallible` as `actix_http::error::Error`
|
||||
|
||||
### Fixed
|
||||
|
||||
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header #1118
|
||||
|
||||
|
||||
## [0.2.10] - 2019-09-11
|
||||
|
||||
### Added
|
||||
|
||||
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests with `RequestHead`
|
||||
|
||||
### Fixed
|
||||
|
||||
* h2 will use error response #1080
|
||||
|
||||
* on_connect result isn't added to request extensions for http2 requests #1009
|
||||
|
||||
|
||||
## [0.2.9] - 2019-08-13
|
||||
|
||||
### Changed
|
||||
|
||||
* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
|
||||
|
||||
* Update percent-encoding to 2.1
|
||||
|
||||
* Update serde_urlencoded to 0.6.1
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
|
||||
|
||||
|
||||
## [0.2.8] - 2019-08-01
|
||||
|
||||
### Added
|
||||
|
||||
* Add `rustls` support
|
||||
|
||||
* Add `Clone` impl for `HeaderMap`
|
||||
|
||||
### Fixed
|
||||
|
||||
* awc client panic #1016
|
||||
|
||||
* Invalid response with compression middleware enabled, but compression-related features disabled #997
|
||||
|
||||
|
||||
## [0.2.7] - 2019-07-18
|
||||
|
||||
### Added
|
||||
|
||||
* Add support for downcasting response errors #986
|
||||
|
||||
|
||||
## [0.2.6] - 2019-07-17
|
||||
|
||||
### Changed
|
||||
|
||||
* Replace `ClonableService` with local copy
|
||||
|
||||
* Upgrade `rand` dependency version to 0.7
|
||||
|
||||
|
||||
## [0.2.5] - 2019-06-28
|
||||
|
||||
### Added
|
||||
|
||||
* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
|
||||
|
||||
### Changed
|
||||
|
||||
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
|
||||
|
||||
* Add `Copy` and `Clone` impls for `ws::Codec`
|
||||
|
||||
|
||||
## [0.2.4] - 2019-06-16
|
||||
|
||||
### Fixed
|
||||
|
||||
* Do not compress NoContent (204) responses #918
|
||||
|
||||
|
||||
## [0.2.3] - 2019-06-02
|
||||
|
||||
### Added
|
||||
|
||||
* Debug impl for ResponseBuilder
|
||||
|
||||
* From SizedStream and BodyStream for Body
|
||||
|
||||
### Changed
|
||||
|
||||
* SizedStream uses u64
|
||||
|
||||
|
||||
## [0.2.2] - 2019-05-29
|
||||
|
||||
### Fixed
|
||||
|
||||
* Parse incoming stream before closing stream on disconnect #868
|
||||
|
||||
|
||||
## [0.2.1] - 2019-05-25
|
||||
|
||||
### Fixed
|
||||
|
||||
* Handle socket read disconnect
|
||||
|
||||
|
||||
## [0.2.0] - 2019-05-12
|
||||
|
||||
### Changed
|
||||
|
||||
* Update actix-service to 0.4
|
||||
|
||||
* Expect and upgrade services accept `ServerConfig` config.
|
||||
|
||||
### Deleted
|
||||
|
||||
* `OneRequest` service
|
||||
|
||||
|
||||
## [0.1.5] - 2019-05-04
|
||||
|
||||
### Fixed
|
||||
|
||||
* Clean up response extensions in response pool #817
|
||||
|
||||
|
||||
## [0.1.4] - 2019-04-24
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to render h1 request headers in `Camel-Case`
|
||||
|
||||
### Fixed
|
||||
|
||||
* Read until eof for http/1.0 responses #771
|
||||
|
||||
|
||||
## [0.1.3] - 2019-04-23
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix http client pool management
|
||||
|
||||
* Fix http client wait queue management #794
|
||||
|
||||
|
||||
## [0.1.2] - 2019-04-23
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix BorrowMutError panic in client connector #793
|
||||
|
||||
|
||||
## [0.1.1] - 2019-04-19
|
||||
|
||||
### Changed
|
||||
|
||||
* Cookie::max_age() accepts value in seconds
|
||||
|
||||
* Cookie::max_age_time() accepts value in time::Duration
|
||||
|
||||
* Allow to specify server address for client connector
|
||||
|
||||
|
||||
## [0.1.0] - 2019-04-16
|
||||
|
||||
### Added
|
||||
|
||||
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
|
||||
|
||||
### Changed
|
||||
|
||||
* `actix_http::encoding` always available
|
||||
|
||||
* use trust-dns-resolver 0.11.0
|
||||
|
||||
|
||||
## [0.1.0-alpha.5] - 2019-04-12
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to use custom service for upgrade requests
|
||||
|
||||
* Added `h1::SendResponse` future.
|
||||
|
||||
### Changed
|
||||
|
||||
* MessageBody::length() renamed to MessageBody::size() for consistency
|
||||
|
||||
* ws handshake verification functions take RequestHead instead of Request
|
||||
|
||||
|
||||
## [0.1.0-alpha.4] - 2019-04-08
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to use custom `Expect` handler
|
||||
|
||||
* Add minimal `std::error::Error` impl for `Error`
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
@@ -1,26 +1,21 @@
|
||||
[package]
|
||||
name = "actix-http"
|
||||
version = "0.1.0-alpha.4"
|
||||
version = "2.2.2"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix http primitives"
|
||||
description = "HTTP primitives for the Actix ecosystem"
|
||||
readme = "README.md"
|
||||
keywords = ["actix", "http", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-http.git"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
documentation = "https://docs.rs/actix-http/"
|
||||
categories = ["network-programming", "asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"]
|
||||
license = "MIT/Apache-2.0"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
workspace = ".."
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["ssl", "fail", "brotli", "flate2-zlib", "secure-cookies"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "actix/actix-web", branch = "master" }
|
||||
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
|
||||
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
|
||||
|
||||
[lib]
|
||||
name = "actix_http"
|
||||
@@ -30,80 +25,85 @@ path = "src/lib.rs"
|
||||
default = []
|
||||
|
||||
# openssl
|
||||
ssl = ["openssl", "actix-connect/ssl"]
|
||||
openssl = ["actix-tls/openssl", "actix-connect/openssl"]
|
||||
|
||||
# brotli encoding, requires c compiler
|
||||
brotli = ["brotli2"]
|
||||
# rustls support
|
||||
rustls = ["actix-tls/rustls", "actix-connect/rustls"]
|
||||
|
||||
# miniz-sys backend for flate2 crate
|
||||
flate2-zlib = ["flate2/miniz-sys"]
|
||||
|
||||
# rust backend for flate2 crate
|
||||
flate2-rust = ["flate2/rust_backend"]
|
||||
|
||||
# failure integration. actix does not use failure anymore
|
||||
fail = ["failure"]
|
||||
# enable compressison support
|
||||
compress = ["flate2", "brotli"]
|
||||
|
||||
# support for secure cookies
|
||||
secure-cookies = ["ring"]
|
||||
secure-cookies = ["cookie/secure"]
|
||||
|
||||
# support for actix Actor messages
|
||||
actors = ["actix"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "0.3.6"
|
||||
actix-codec = "0.1.2"
|
||||
actix-connect = "0.1.2"
|
||||
actix-utils = "0.3.5"
|
||||
actix-server-config = "0.1.0"
|
||||
actix-threadpool = "0.1.0"
|
||||
actix-service = "1.0.6"
|
||||
actix-codec = "0.3.0"
|
||||
actix-connect = "2.0.0"
|
||||
actix-utils = "2.0.0"
|
||||
actix-rt = "1.0.0"
|
||||
actix-threadpool = "0.3.1"
|
||||
actix-tls = { version = "2.0.0", optional = true }
|
||||
actix = { version = "0.10.0", optional = true }
|
||||
|
||||
base64 = "0.10"
|
||||
bitflags = "1.0"
|
||||
bytes = "0.4"
|
||||
byteorder = "1.2"
|
||||
copyless = "0.1.2"
|
||||
derive_more = "0.14"
|
||||
either = "1.5.2"
|
||||
encoding = "0.2"
|
||||
futures = "0.1"
|
||||
hashbrown = "0.1.8"
|
||||
h2 = "0.1.16"
|
||||
http = "0.1.16"
|
||||
base64 = "0.13"
|
||||
bitflags = "1.2"
|
||||
bytes = "0.5.3"
|
||||
cookie = { version = "0.14.1", features = ["percent-encode"] }
|
||||
copyless = "0.1.4"
|
||||
derive_more = "0.99.2"
|
||||
either = "1.5.3"
|
||||
encoding_rs = "0.8"
|
||||
futures-channel = { version = "0.3.5", default-features = false }
|
||||
futures-core = { version = "0.3.5", default-features = false }
|
||||
futures-util = { version = "0.3.5", default-features = false }
|
||||
fxhash = "0.2.1"
|
||||
h2 = "0.2.1"
|
||||
http = "0.2.0"
|
||||
httparse = "1.3"
|
||||
indexmap = "1.0"
|
||||
lazy_static = "1.0"
|
||||
indexmap = "1.3"
|
||||
itoa = "0.4"
|
||||
lazy_static = "1.4"
|
||||
language-tags = "0.2"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
percent-encoding = "1.0"
|
||||
rand = "0.6"
|
||||
regex = "1.0"
|
||||
percent-encoding = "2.1"
|
||||
pin-project = "1.0.0"
|
||||
rand = "0.7"
|
||||
regex = "1.3"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
sha1 = "0.6"
|
||||
sha-1 = "0.9"
|
||||
slab = "0.4"
|
||||
serde_urlencoded = "0.5.3"
|
||||
time = "0.1"
|
||||
tokio-tcp = "0.1.3"
|
||||
tokio-timer = "0.2"
|
||||
tokio-current-thread = "0.1"
|
||||
trust-dns-resolver = { version="0.11.0-alpha.2", default-features = false }
|
||||
|
||||
# for secure cookie
|
||||
ring = { version = "0.14.6", optional = true }
|
||||
serde_urlencoded = "0.7"
|
||||
time = { version = "0.2.7", default-features = false, features = ["std"] }
|
||||
|
||||
# compression
|
||||
brotli2 = { version="^0.3.2", optional = true }
|
||||
flate2 = { version="^1.0.2", optional = true, default-features = false }
|
||||
|
||||
# optional deps
|
||||
failure = { version = "0.1.5", optional = true }
|
||||
openssl = { version="0.10", optional = true }
|
||||
brotli = { version = "3.3.3", optional = true }
|
||||
flate2 = { version = "1.0.13", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "0.2.2"
|
||||
actix-server = { version = "0.4.1", features=["ssl"] }
|
||||
actix-connect = { version = "0.1.0", features=["ssl"] }
|
||||
actix-http-test = { version = "0.1.0-alpha.3", features=["ssl"] }
|
||||
env_logger = "0.6"
|
||||
actix-server = "1.0.1"
|
||||
actix-connect = { version = "2.0.0", features = ["openssl"] }
|
||||
actix-http-test = { version = "2.0.0", features = ["openssl"] }
|
||||
actix-tls = { version = "2.0.0", features = ["openssl"] }
|
||||
criterion = "0.3"
|
||||
env_logger = "0.7"
|
||||
serde_derive = "1.0"
|
||||
openssl = { version="0.10" }
|
||||
tokio-tcp = "0.1"
|
||||
open-ssl = { version="0.10", package = "openssl" }
|
||||
rust-tls = { version="0.18", package = "rustls" }
|
||||
|
||||
[[bench]]
|
||||
name = "content-length"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "status-line"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "uninit-headers"
|
||||
harness = false
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
actix-http/LICENSE-APACHE
Symbolic link
1
actix-http/LICENSE-APACHE
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
||||
@@ -1,25 +0,0 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
1
actix-http/LICENSE-MIT
Symbolic link
1
actix-http/LICENSE-MIT
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
||||
@@ -1,32 +1,50 @@
|
||||
# Actix http [](https://travis-ci.org/actix/actix-http) [](https://codecov.io/gh/actix/actix-http) [](https://crates.io/crates/actix-web) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# actix-http
|
||||
|
||||
Actix http
|
||||
> HTTP primitives for the Actix ecosystem.
|
||||
|
||||
## Documentation & community resources
|
||||
[](https://crates.io/crates/actix-http)
|
||||
[](https://docs.rs/actix-http/2.2.2)
|
||||

|
||||
[](https://deps.rs/crate/actix-http/2.2.2)
|
||||
[](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
* [User Guide](https://actix.rs/docs/)
|
||||
* [API Documentation](https://docs.rs/actix-http/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
|
||||
* Minimum supported Rust version: 1.26 or later
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-http)
|
||||
- [Chat on Gitter](https://gitter.im/actix/actix-web)
|
||||
- Minimum Supported Rust Version (MSRV): 1.42.0
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
// see examples/framed_hello.rs for complete list of used crates.
|
||||
extern crate actix_http;
|
||||
use actix_http::{h1, Response, ServiceConfig};
|
||||
use std::{env, io};
|
||||
|
||||
fn main() {
|
||||
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
|
||||
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
|
||||
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
|
||||
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
|
||||
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
|
||||
.map_err(|_| ())
|
||||
.map(|_| ())
|
||||
})
|
||||
}).unwrap().run();
|
||||
use actix_http::{HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use futures_util::future;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "hello_world=info");
|
||||
env_logger::init();
|
||||
|
||||
Server::build()
|
||||
.bind("hello-world", "127.0.0.1:8080", || {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
.finish(|_req| {
|
||||
info!("{:?}", _req);
|
||||
let mut res = Response::Ok();
|
||||
res.header("x-head", HeaderValue::from_static("dummy value!"));
|
||||
future::ok::<_, ()>(res.body("Hello world!"))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
291
actix-http/benches/content-length.rs
Normal file
291
actix-http/benches/content-length.rs
Normal file
@@ -0,0 +1,291 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use bytes::BytesMut;
|
||||
|
||||
// benchmark sending all requests at the same time
|
||||
fn bench_write_content_length(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_content_length");
|
||||
|
||||
let sizes = [
|
||||
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
|
||||
4294967202,
|
||||
];
|
||||
|
||||
for i in sizes.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_original::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_new::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_itoa::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_write_content_length);
|
||||
criterion_main!(benches);
|
||||
|
||||
mod _itoa {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
|
||||
if n == 0 {
|
||||
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut buf = itoa::Buffer::new();
|
||||
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
bytes.put_slice(buf.format(n).as_bytes());
|
||||
bytes.put_slice(b"\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
mod _new {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
const DIGITS_START: u8 = b'0';
|
||||
|
||||
/// NOTE: bytes object has to contain enough space
|
||||
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
|
||||
if n == 0 {
|
||||
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
|
||||
if n < 10 {
|
||||
bytes.put_u8(DIGITS_START + (n as u8));
|
||||
} else if n < 100 {
|
||||
let n = n as u8;
|
||||
|
||||
let d10 = n / 10;
|
||||
let d1 = n % 10;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 1000 {
|
||||
let n = n as u16;
|
||||
|
||||
let d100 = (n / 100) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 10_000 {
|
||||
let n = n as u16;
|
||||
|
||||
let d1000 = (n / 1000) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 100_000 {
|
||||
let n = n as u32;
|
||||
|
||||
let d10000 = (n / 10000) as u8;
|
||||
let d1000 = ((n / 1000) % 10) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d10000);
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 1_000_000 {
|
||||
let n = n as u32;
|
||||
|
||||
let d100000 = (n / 100000) as u8;
|
||||
let d10000 = ((n / 10000) % 10) as u8;
|
||||
let d1000 = ((n / 1000) % 10) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d100000);
|
||||
bytes.put_u8(DIGITS_START + d10000);
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else {
|
||||
write_usize(n, bytes);
|
||||
}
|
||||
|
||||
bytes.put_slice(b"\r\n");
|
||||
}
|
||||
|
||||
fn write_usize(n: usize, bytes: &mut BytesMut) {
|
||||
let mut n = n;
|
||||
|
||||
// 20 chars is max length of a usize (2^64)
|
||||
// digits will be added to the buffer from lsd to msd
|
||||
let mut buf = BytesMut::with_capacity(20);
|
||||
|
||||
while n > 9 {
|
||||
// "pop" the least-significant digit
|
||||
let lsd = (n % 10) as u8;
|
||||
|
||||
// remove the lsd from n
|
||||
n = n / 10;
|
||||
|
||||
buf.put_u8(DIGITS_START + lsd);
|
||||
}
|
||||
|
||||
// put msd to result buffer
|
||||
bytes.put_u8(DIGITS_START + (n as u8));
|
||||
|
||||
// put, in reverse (msd to lsd), remaining digits to buffer
|
||||
for i in (0..buf.len()).rev() {
|
||||
bytes.put_u8(buf[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod _original {
|
||||
use std::{mem, ptr, slice};
|
||||
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
|
||||
2021222324252627282930313233343536373839\
|
||||
4041424344454647484950515253545556575859\
|
||||
6061626364656667686970717273747576777879\
|
||||
8081828384858687888990919293949596979899";
|
||||
|
||||
/// NOTE: bytes object has to contain enough space
|
||||
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
|
||||
if n < 10 {
|
||||
let mut buf: [u8; 21] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
|
||||
];
|
||||
buf[18] = (n as u8) + b'0';
|
||||
bytes.put_slice(&buf);
|
||||
} else if n < 100 {
|
||||
let mut buf: [u8; 22] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
|
||||
];
|
||||
let d1 = n << 1;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
DEC_DIGITS_LUT.as_ptr().add(d1),
|
||||
buf.as_mut_ptr().offset(18),
|
||||
2,
|
||||
);
|
||||
}
|
||||
bytes.put_slice(&buf);
|
||||
} else if n < 1000 {
|
||||
let mut buf: [u8; 23] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
|
||||
b'\n',
|
||||
];
|
||||
// decode 2 more chars, if > 2 chars
|
||||
let d1 = (n % 100) << 1;
|
||||
n /= 100;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
DEC_DIGITS_LUT.as_ptr().add(d1),
|
||||
buf.as_mut_ptr().offset(19),
|
||||
2,
|
||||
)
|
||||
};
|
||||
|
||||
// decode last 1
|
||||
buf[18] = (n as u8) + b'0';
|
||||
|
||||
bytes.put_slice(&buf);
|
||||
} else {
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
convert_usize(n, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
|
||||
let mut curr: isize = 39;
|
||||
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
|
||||
buf[39] = b'\r';
|
||||
buf[40] = b'\n';
|
||||
let buf_ptr = buf.as_mut_ptr();
|
||||
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
|
||||
|
||||
// eagerly decode 4 characters at a time
|
||||
while n >= 10_000 {
|
||||
let rem = (n % 10_000) as isize;
|
||||
n /= 10_000;
|
||||
|
||||
let d1 = (rem / 100) << 1;
|
||||
let d2 = (rem % 100) << 1;
|
||||
curr -= 4;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d2),
|
||||
buf_ptr.offset(curr + 2),
|
||||
2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// if we reach here numbers are <= 9999, so at most 4 chars long
|
||||
let mut n = n as isize; // possibly reduce 64bit math
|
||||
|
||||
// decode 2 more chars, if > 2 chars
|
||||
if n >= 100 {
|
||||
let d1 = (n % 100) << 1;
|
||||
n /= 100;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
}
|
||||
|
||||
// decode last 1 or 2 chars
|
||||
if n < 10 {
|
||||
curr -= 1;
|
||||
unsafe {
|
||||
*buf_ptr.offset(curr) = (n as u8) + b'0';
|
||||
}
|
||||
} else {
|
||||
let d1 = n << 1;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
bytes.extend_from_slice(slice::from_raw_parts(
|
||||
buf_ptr.offset(curr),
|
||||
41 - curr as usize,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
222
actix-http/benches/status-line.rs
Normal file
222
actix-http/benches/status-line.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use bytes::BytesMut;
|
||||
use http::Version;
|
||||
|
||||
const CODES: &[u16] = &[201, 303, 404, 515];
|
||||
|
||||
fn bench_write_status_line_11(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_status_line v1.1");
|
||||
|
||||
let version = Version::HTTP_11;
|
||||
|
||||
for i in CODES.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_original::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_new::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_naive::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_write_status_line_10(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_status_line v1.0");
|
||||
|
||||
let version = Version::HTTP_10;
|
||||
|
||||
for i in CODES.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_original::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_new::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_naive::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_write_status_line_09(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_status_line v0.9");
|
||||
|
||||
let version = Version::HTTP_09;
|
||||
|
||||
for i in CODES.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_original::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_new::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_naive::write_status_line(version, i, &mut b);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_write_status_line_11,
|
||||
bench_write_status_line_10,
|
||||
bench_write_status_line_09
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
||||
mod _naive {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use http::Version;
|
||||
|
||||
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
|
||||
match version {
|
||||
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
|
||||
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
|
||||
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
|
||||
_ => {
|
||||
// other HTTP version handlers do not use this method
|
||||
}
|
||||
}
|
||||
|
||||
bytes.put_slice(n.to_string().as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
mod _new {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use http::Version;
|
||||
|
||||
const DIGITS_START: u8 = b'0';
|
||||
|
||||
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
|
||||
match version {
|
||||
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
|
||||
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
|
||||
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
|
||||
_ => {
|
||||
// other HTTP version handlers do not use this method
|
||||
}
|
||||
}
|
||||
|
||||
let d100 = (n / 100) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
|
||||
bytes.put_u8(b' ');
|
||||
}
|
||||
}
|
||||
|
||||
mod _original {
|
||||
use std::ptr;
|
||||
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use http::Version;
|
||||
|
||||
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
|
||||
2021222324252627282930313233343536373839\
|
||||
4041424344454647484950515253545556575859\
|
||||
6061626364656667686970717273747576777879\
|
||||
8081828384858687888990919293949596979899";
|
||||
|
||||
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
|
||||
|
||||
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
|
||||
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
|
||||
|
||||
match version {
|
||||
Version::HTTP_2 => buf[5] = b'2',
|
||||
Version::HTTP_10 => buf[7] = b'0',
|
||||
Version::HTTP_09 => {
|
||||
buf[5] = b'0';
|
||||
buf[7] = b'9';
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let mut curr: isize = 12;
|
||||
let buf_ptr = buf.as_mut_ptr();
|
||||
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
|
||||
let four = n > 999;
|
||||
|
||||
// decode 2 more chars, if > 2 chars
|
||||
let d1 = (n % 100) << 1;
|
||||
n /= 100;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d1 as isize),
|
||||
buf_ptr.offset(curr),
|
||||
2,
|
||||
);
|
||||
}
|
||||
|
||||
// decode last 1 or 2 chars
|
||||
if n < 10 {
|
||||
curr -= 1;
|
||||
unsafe {
|
||||
*buf_ptr.offset(curr) = (n as u8) + b'0';
|
||||
}
|
||||
} else {
|
||||
let d1 = n << 1;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d1 as isize),
|
||||
buf_ptr.offset(curr),
|
||||
2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bytes.put_slice(&buf);
|
||||
if four {
|
||||
bytes.put_u8(b' ');
|
||||
}
|
||||
}
|
||||
}
|
||||
137
actix-http/benches/uninit-headers.rs
Normal file
137
actix-http/benches/uninit-headers.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
use bytes::BytesMut;
|
||||
|
||||
// A Miri run detects UB, seen on this playground:
|
||||
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
|
||||
|
||||
fn bench_header_parsing(c: &mut Criterion) {
|
||||
c.bench_function("Original (Unsound) [short]", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytesMut::from(REQ_SHORT);
|
||||
_original::parse_headers(&mut buf);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("New (safe) [short]", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytesMut::from(REQ_SHORT);
|
||||
_new::parse_headers(&mut buf);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("Original (Unsound) [realistic]", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytesMut::from(REQ);
|
||||
_original::parse_headers(&mut buf);
|
||||
})
|
||||
});
|
||||
|
||||
c.bench_function("New (safe) [realistic]", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytesMut::from(REQ);
|
||||
_new::parse_headers(&mut buf);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_header_parsing);
|
||||
criterion_main!(benches);
|
||||
|
||||
const MAX_HEADERS: usize = 96;
|
||||
|
||||
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
|
||||
[httparse::EMPTY_HEADER; MAX_HEADERS];
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct HeaderIndex {
|
||||
name: (usize, usize),
|
||||
value: (usize, usize),
|
||||
}
|
||||
|
||||
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
|
||||
name: (0, 0),
|
||||
value: (0, 0),
|
||||
};
|
||||
|
||||
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
|
||||
[EMPTY_HEADER_INDEX; MAX_HEADERS];
|
||||
|
||||
impl HeaderIndex {
|
||||
fn record(
|
||||
bytes: &[u8],
|
||||
headers: &[httparse::Header<'_>],
|
||||
indices: &mut [HeaderIndex],
|
||||
) {
|
||||
let bytes_ptr = bytes.as_ptr() as usize;
|
||||
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
|
||||
let name_start = header.name.as_ptr() as usize - bytes_ptr;
|
||||
let name_end = name_start + header.name.len();
|
||||
indices.name = (name_start, name_end);
|
||||
let value_start = header.value.as_ptr() as usize - bytes_ptr;
|
||||
let value_end = value_start + header.value.len();
|
||||
indices.value = (value_start, value_end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test cases taken from:
|
||||
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
|
||||
|
||||
const REQ_SHORT: &'static [u8] = b"\
|
||||
GET / HTTP/1.0\r\n\
|
||||
Host: example.com\r\n\
|
||||
Cookie: session=60; user_id=1\r\n\r\n";
|
||||
|
||||
const REQ: &'static [u8] = b"\
|
||||
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
|
||||
Host: www.kittyhell.com\r\n\
|
||||
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
|
||||
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
|
||||
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
|
||||
Accept-Encoding: gzip,deflate\r\n\
|
||||
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
|
||||
Keep-Alive: 115\r\n\
|
||||
Connection: keep-alive\r\n\
|
||||
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
|
||||
|
||||
mod _new {
|
||||
use super::*;
|
||||
|
||||
pub fn parse_headers(src: &mut BytesMut) -> usize {
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
|
||||
|
||||
let mut req = httparse::Request::new(&mut parsed);
|
||||
match req.parse(src).unwrap() {
|
||||
httparse::Status::Complete(_len) => {
|
||||
HeaderIndex::record(src, req.headers, &mut headers);
|
||||
req.headers.len()
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod _original {
|
||||
use super::*;
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn parse_headers(src: &mut BytesMut) -> usize {
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
let mut req = httparse::Request::new(&mut parsed);
|
||||
match req.parse(src).unwrap() {
|
||||
httparse::Status::Complete(_len) => {
|
||||
HeaderIndex::record(src, req.headers, &mut headers);
|
||||
req.headers.len()
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
use std::{env, io};
|
||||
|
||||
use actix_http::{error::PayloadError, HttpService, Request, Response};
|
||||
use actix_http::{Error, HttpService, Request, Response};
|
||||
use actix_server::Server;
|
||||
use bytes::BytesMut;
|
||||
use futures::{Future, Stream};
|
||||
use futures_util::StreamExt;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "echo=info");
|
||||
env_logger::init();
|
||||
|
||||
@@ -16,22 +17,21 @@ fn main() -> io::Result<()> {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
.finish(|mut req: Request| {
|
||||
req.take_payload()
|
||||
.fold(BytesMut::new(), move |mut body, chunk| {
|
||||
body.extend_from_slice(&chunk);
|
||||
Ok::<_, PayloadError>(body)
|
||||
})
|
||||
.and_then(|bytes| {
|
||||
info!("request body: {:?}", bytes);
|
||||
let mut res = Response::Ok();
|
||||
res.header(
|
||||
"x-head",
|
||||
HeaderValue::from_static("dummy value!"),
|
||||
);
|
||||
Ok(res.body(bytes))
|
||||
})
|
||||
.finish(|mut req: Request| async move {
|
||||
let mut body = BytesMut::new();
|
||||
while let Some(item) = req.payload().next().await {
|
||||
body.extend_from_slice(&item?);
|
||||
}
|
||||
|
||||
info!("request body: {:?}", body);
|
||||
Ok::<_, Error>(
|
||||
Response::Ok()
|
||||
.header("x-head", HeaderValue::from_static("dummy value!"))
|
||||
.body(body),
|
||||
)
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -1,34 +1,33 @@
|
||||
use std::{env, io};
|
||||
|
||||
use actix_http::http::HeaderValue;
|
||||
use actix_http::{error::PayloadError, Error, HttpService, Request, Response};
|
||||
use actix_http::{Error, HttpService, Request, Response};
|
||||
use actix_server::Server;
|
||||
use bytes::BytesMut;
|
||||
use futures::{Future, Stream};
|
||||
use futures_util::StreamExt;
|
||||
use log::info;
|
||||
|
||||
fn handle_request(mut req: Request) -> impl Future<Item = Response, Error = Error> {
|
||||
req.take_payload()
|
||||
.fold(BytesMut::new(), move |mut body, chunk| {
|
||||
body.extend_from_slice(&chunk);
|
||||
Ok::<_, PayloadError>(body)
|
||||
})
|
||||
.from_err()
|
||||
.and_then(|bytes| {
|
||||
info!("request body: {:?}", bytes);
|
||||
let mut res = Response::Ok();
|
||||
res.header("x-head", HeaderValue::from_static("dummy value!"));
|
||||
Ok(res.body(bytes))
|
||||
})
|
||||
async fn handle_request(mut req: Request) -> Result<Response, Error> {
|
||||
let mut body = BytesMut::new();
|
||||
while let Some(item) = req.payload().next().await {
|
||||
body.extend_from_slice(&item?)
|
||||
}
|
||||
|
||||
info!("request body: {:?}", body);
|
||||
Ok(Response::Ok()
|
||||
.header("x-head", HeaderValue::from_static("dummy value!"))
|
||||
.body(body))
|
||||
}
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "echo=info");
|
||||
env_logger::init();
|
||||
|
||||
Server::build()
|
||||
.bind("echo", "127.0.0.1:8080", || {
|
||||
HttpService::build().finish(|_req: Request| handle_request(_req))
|
||||
HttpService::build().finish(handle_request).tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ use std::{env, io};
|
||||
|
||||
use actix_http::{HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use futures::future;
|
||||
use futures_util::future;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
fn main() -> io::Result<()> {
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "hello_world=info");
|
||||
env_logger::init();
|
||||
|
||||
@@ -21,6 +22,8 @@ fn main() -> io::Result<()> {
|
||||
res.header("x-head", HeaderValue::from_static("dummy value!"));
|
||||
future::ok::<_, ()>(res.body("Hello world!"))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, mem};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{Async, Poll, Stream};
|
||||
use futures_core::Stream;
|
||||
use futures_util::ready;
|
||||
use pin_project::pin_project;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
@@ -11,53 +15,60 @@ use crate::error::Error;
|
||||
pub enum BodySize {
|
||||
None,
|
||||
Empty,
|
||||
Sized(usize),
|
||||
Sized64(u64),
|
||||
Sized(u64),
|
||||
Stream,
|
||||
}
|
||||
|
||||
impl BodySize {
|
||||
pub fn is_eof(&self) -> bool {
|
||||
match self {
|
||||
BodySize::None
|
||||
| BodySize::Empty
|
||||
| BodySize::Sized(0)
|
||||
| BodySize::Sized64(0) => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
|
||||
}
|
||||
}
|
||||
|
||||
/// Type that provides this trait can be streamed to a peer.
|
||||
pub trait MessageBody {
|
||||
fn length(&self) -> BodySize;
|
||||
fn size(&self) -> BodySize;
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error>;
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>>;
|
||||
|
||||
downcast_get_type_id!();
|
||||
}
|
||||
|
||||
downcast!(MessageBody);
|
||||
|
||||
impl MessageBody for () {
|
||||
fn length(&self) -> BodySize {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Empty
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
Ok(Async::Ready(None))
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
Poll::Ready(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: MessageBody> MessageBody for Box<T> {
|
||||
fn length(&self) -> BodySize {
|
||||
self.as_ref().length()
|
||||
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
|
||||
fn size(&self) -> BodySize {
|
||||
self.as_ref().size()
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
self.as_mut().poll_next()
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
Pin::new(self.get_mut().as_mut()).poll_next(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = ResponseBodyProj)]
|
||||
pub enum ResponseBody<B> {
|
||||
Body(B),
|
||||
Other(Body),
|
||||
Body(#[pin] B),
|
||||
Other(#[pin] Body),
|
||||
}
|
||||
|
||||
impl ResponseBody<Body> {
|
||||
@@ -86,30 +97,39 @@ impl<B: MessageBody> ResponseBody<B> {
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for ResponseBody<B> {
|
||||
fn length(&self) -> BodySize {
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
ResponseBody::Body(ref body) => body.length(),
|
||||
ResponseBody::Other(ref body) => body.length(),
|
||||
ResponseBody::Body(ref body) => body.size(),
|
||||
ResponseBody::Other(ref body) => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
match self {
|
||||
ResponseBody::Body(ref mut body) => body.poll_next(),
|
||||
ResponseBody::Other(ref mut body) => body.poll_next(),
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
match self.project() {
|
||||
ResponseBodyProj::Body(body) => body.poll_next(cx),
|
||||
ResponseBodyProj::Other(body) => body.poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> Stream for ResponseBody<B> {
|
||||
type Item = Bytes;
|
||||
type Error = Error;
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.poll_next()
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
match self.project() {
|
||||
ResponseBodyProj::Body(body) => body.poll_next(cx),
|
||||
ResponseBodyProj::Other(body) => body.poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = BodyProj)]
|
||||
/// Represents various types of http message body.
|
||||
pub enum Body {
|
||||
/// Empty response. `Content-Length` header is not set.
|
||||
@@ -119,44 +139,47 @@ pub enum Body {
|
||||
/// Specific response body.
|
||||
Bytes(Bytes),
|
||||
/// Generic message body.
|
||||
Message(Box<dyn MessageBody>),
|
||||
Message(Box<dyn MessageBody + Unpin>),
|
||||
}
|
||||
|
||||
impl Body {
|
||||
/// Create body from slice (copy)
|
||||
pub fn from_slice(s: &[u8]) -> Body {
|
||||
Body::Bytes(Bytes::from(s))
|
||||
Body::Bytes(Bytes::copy_from_slice(s))
|
||||
}
|
||||
|
||||
/// Create body from generic message body.
|
||||
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
|
||||
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
|
||||
Body::Message(Box::new(body))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Body {
|
||||
fn length(&self) -> BodySize {
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
Body::None => BodySize::None,
|
||||
Body::Empty => BodySize::Empty,
|
||||
Body::Bytes(ref bin) => BodySize::Sized(bin.len()),
|
||||
Body::Message(ref body) => body.length(),
|
||||
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
|
||||
Body::Message(ref body) => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
match self {
|
||||
Body::None => Ok(Async::Ready(None)),
|
||||
Body::Empty => Ok(Async::Ready(None)),
|
||||
Body::Bytes(ref mut bin) => {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
match self.project() {
|
||||
BodyProj::None => Poll::Ready(None),
|
||||
BodyProj::Empty => Poll::Ready(None),
|
||||
BodyProj::Bytes(ref mut bin) => {
|
||||
let len = bin.len();
|
||||
if len == 0 {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(mem::replace(bin, Bytes::new()))))
|
||||
Poll::Ready(Some(Ok(mem::take(bin))))
|
||||
}
|
||||
}
|
||||
Body::Message(ref mut body) => body.poll_next(),
|
||||
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -164,14 +187,8 @@ impl MessageBody for Body {
|
||||
impl PartialEq for Body {
|
||||
fn eq(&self, other: &Body) -> bool {
|
||||
match *self {
|
||||
Body::None => match *other {
|
||||
Body::None => true,
|
||||
_ => false,
|
||||
},
|
||||
Body::Empty => match *other {
|
||||
Body::Empty => true,
|
||||
_ => false,
|
||||
},
|
||||
Body::None => matches!(*other, Body::None),
|
||||
Body::Empty => matches!(*other, Body::Empty),
|
||||
Body::Bytes(ref b) => match *other {
|
||||
Body::Bytes(ref b2) => b == b2,
|
||||
_ => false,
|
||||
@@ -182,10 +199,10 @@ impl PartialEq for Body {
|
||||
}
|
||||
|
||||
impl fmt::Debug for Body {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
Body::None => write!(f, "Body::None"),
|
||||
Body::Empty => write!(f, "Body::Zero"),
|
||||
Body::Empty => write!(f, "Body::Empty"),
|
||||
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
|
||||
Body::Message(_) => write!(f, "Body::Message(_)"),
|
||||
}
|
||||
@@ -218,7 +235,7 @@ impl From<String> for Body {
|
||||
|
||||
impl<'a> From<&'a String> for Body {
|
||||
fn from(s: &'a String) -> Body {
|
||||
Body::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s)))
|
||||
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,96 +251,115 @@ impl From<BytesMut> for Body {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Value> for Body {
|
||||
fn from(v: serde_json::Value) -> Body {
|
||||
Body::Bytes(v.to_string().into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> From<SizedStream<S>> for Body
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
|
||||
{
|
||||
fn from(s: SizedStream<S>) -> Body {
|
||||
Body::from_message(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> From<BodyStream<S, E>> for Body
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
E: Into<Error> + 'static,
|
||||
{
|
||||
fn from(s: BodyStream<S, E>) -> Body {
|
||||
Body::from_message(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Bytes {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(mem::replace(self, Bytes::new()))))
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for BytesMut {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(
|
||||
mem::replace(self, BytesMut::new()).freeze(),
|
||||
)))
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static str {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(Bytes::from_static(
|
||||
mem::replace(self, "").as_ref(),
|
||||
Poll::Ready(Some(Ok(Bytes::from_static(
|
||||
mem::take(self.get_mut()).as_ref(),
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static [u8] {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
Ok(Async::Ready(Some(Bytes::from_static(mem::replace(
|
||||
self, b"",
|
||||
)))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Vec<u8> {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(Bytes::from(mem::replace(
|
||||
self,
|
||||
Vec::new(),
|
||||
)))))
|
||||
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for String {
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.len())
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Ok(Async::Ready(Some(Bytes::from(
|
||||
mem::replace(self, String::new()).into_bytes(),
|
||||
Poll::Ready(Some(Ok(Bytes::from(
|
||||
mem::take(self.get_mut()).into_bytes(),
|
||||
))))
|
||||
}
|
||||
}
|
||||
@@ -331,14 +367,16 @@ impl MessageBody for String {
|
||||
|
||||
/// Type represent streaming body.
|
||||
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
|
||||
pub struct BodyStream<S, E> {
|
||||
#[pin_project]
|
||||
pub struct BodyStream<S: Unpin, E> {
|
||||
#[pin]
|
||||
stream: S,
|
||||
_t: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<S, E> BodyStream<S, E>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = E>,
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
E: Into<Error>,
|
||||
{
|
||||
pub fn new(stream: S) -> Self {
|
||||
@@ -351,50 +389,85 @@ where
|
||||
|
||||
impl<S, E> MessageBody for BodyStream<S, E>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = E>,
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
E: Into<Error>,
|
||||
{
|
||||
fn length(&self) -> BodySize {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Stream
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
self.stream.poll().map_err(std::convert::Into::into)
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
let mut stream = self.project().stream;
|
||||
loop {
|
||||
let stream = stream.as_mut();
|
||||
return Poll::Ready(match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
opt => opt.map(|res| res.map_err(Into::into)),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type represent streaming body. This body implementation should be used
|
||||
/// if total size of stream is known. Data get sent as is without using transfer encoding.
|
||||
pub struct SizedStream<S> {
|
||||
size: usize,
|
||||
#[pin_project]
|
||||
pub struct SizedStream<S: Unpin> {
|
||||
size: u64,
|
||||
#[pin]
|
||||
stream: S,
|
||||
}
|
||||
|
||||
impl<S> SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = Error>,
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
pub fn new(size: usize, stream: S) -> Self {
|
||||
pub fn new(size: u64, stream: S) -> Self {
|
||||
SizedStream { size, stream }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> MessageBody for SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = Error>,
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
fn length(&self) -> BodySize {
|
||||
BodySize::Sized(self.size)
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.size as u64)
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
self.stream.poll()
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
let mut stream: Pin<&mut S> = self.project().stream;
|
||||
loop {
|
||||
let stream = stream.as_mut();
|
||||
return Poll::Ready(match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
val => val,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use futures_util::future::poll_fn;
|
||||
use futures_util::pin_mut;
|
||||
use futures_util::stream;
|
||||
|
||||
impl Body {
|
||||
pub(crate) fn get_ref(&self) -> &[u8] {
|
||||
@@ -414,49 +487,237 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_str() {
|
||||
assert_eq!(Body::from("").length(), BodySize::Sized(0));
|
||||
assert_eq!(Body::from("test").length(), BodySize::Sized(4));
|
||||
#[actix_rt::test]
|
||||
async fn test_static_str() {
|
||||
assert_eq!(Body::from("").size(), BodySize::Sized(0));
|
||||
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from("test").get_ref(), b"test");
|
||||
|
||||
assert_eq!("test".size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_bytes() {
|
||||
assert_eq!(Body::from(b"test".as_ref()).length(), BodySize::Sized(4));
|
||||
#[actix_rt::test]
|
||||
async fn test_static_bytes() {
|
||||
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
|
||||
assert_eq!(
|
||||
Body::from_slice(b"test".as_ref()).length(),
|
||||
Body::from_slice(b"test".as_ref()).size(),
|
||||
BodySize::Sized(4)
|
||||
);
|
||||
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
|
||||
let sb = Bytes::from(&b"test"[..]);
|
||||
pin_mut!(sb);
|
||||
|
||||
assert_eq!(sb.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vec() {
|
||||
assert_eq!(Body::from(Vec::from("test")).length(), BodySize::Sized(4));
|
||||
#[actix_rt::test]
|
||||
async fn test_vec() {
|
||||
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
|
||||
let test_vec = Vec::from("test");
|
||||
pin_mut!(test_vec);
|
||||
|
||||
assert_eq!(test_vec.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes() {
|
||||
assert_eq!(Body::from(Bytes::from("test")).length(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(Bytes::from("test")).get_ref(), b"test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string() {
|
||||
let b = "test".to_owned();
|
||||
assert_eq!(Body::from(b.clone()).length(), BodySize::Sized(4));
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes() {
|
||||
let b = Bytes::from("test");
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
assert_eq!(Body::from(&b).length(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(&b).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_mut() {
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes_mut() {
|
||||
let b = BytesMut::from("test");
|
||||
assert_eq!(Body::from(b.clone()).length(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b).get_ref(), b"test");
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_string() {
|
||||
let b = "test".to_owned();
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(&b).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_unit() {
|
||||
assert_eq!(().size(), BodySize::Empty);
|
||||
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
|
||||
.await
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_box() {
|
||||
let val = Box::new(());
|
||||
pin_mut!(val);
|
||||
assert_eq!(val.size(), BodySize::Empty);
|
||||
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_eq() {
|
||||
assert!(
|
||||
Body::Bytes(Bytes::from_static(b"1"))
|
||||
== Body::Bytes(Bytes::from_static(b"1"))
|
||||
);
|
||||
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_debug() {
|
||||
assert!(format!("{:?}", Body::None).contains("Body::None"));
|
||||
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
|
||||
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_serde_json() {
|
||||
use serde_json::json;
|
||||
assert_eq!(
|
||||
Body::from(serde_json::Value::String("test".into())).size(),
|
||||
BodySize::Sized(6)
|
||||
);
|
||||
assert_eq!(
|
||||
Body::from(json!({"test-key":"test-value"})).size(),
|
||||
BodySize::Sized(25)
|
||||
);
|
||||
}
|
||||
|
||||
mod body_stream {
|
||||
use super::*;
|
||||
//use futures::task::noop_waker;
|
||||
//use futures::stream::once;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = BodyStream::new(stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
|
||||
));
|
||||
pin_mut!(body);
|
||||
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
|
||||
/* Now it does not compile as it should
|
||||
#[actix_rt::test]
|
||||
async fn move_pinned_pointer() {
|
||||
let (sender, receiver) = futures::channel::oneshot::channel();
|
||||
let mut body_stream = Ok(BodyStream::new(once(async {
|
||||
let x = Box::new(0i32);
|
||||
let y = &x;
|
||||
receiver.await.unwrap();
|
||||
let _z = **y;
|
||||
Ok::<_, ()>(Bytes::new())
|
||||
})));
|
||||
|
||||
let waker = noop_waker();
|
||||
let mut context = Context::from_waker(&waker);
|
||||
pin_mut!(body_stream);
|
||||
|
||||
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
|
||||
sender.send(()).unwrap();
|
||||
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
|
||||
}*/
|
||||
}
|
||||
|
||||
mod sized_stream {
|
||||
use super::*;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = SizedStream::new(
|
||||
2,
|
||||
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
|
||||
);
|
||||
pin_mut!(body);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_casting() {
|
||||
let mut body = String::from("hello cast");
|
||||
let resp_body: &mut dyn MessageBody = &mut body;
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast");
|
||||
let body = &mut resp_body.downcast_mut::<String>().unwrap();
|
||||
body.push('!');
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast!");
|
||||
let not_body = resp_body.downcast_ref::<()>();
|
||||
assert!(not_body.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,35 +1,45 @@
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::rc::Rc;
|
||||
use std::{fmt, net};
|
||||
|
||||
use actix_server_config::ServerConfig as SrvConfig;
|
||||
use actix_service::{IntoNewService, NewService, Service};
|
||||
use actix_codec::Framed;
|
||||
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::config::{KeepAlive, ServiceConfig};
|
||||
use crate::error::Error;
|
||||
use crate::h1::{ExpectHandler, H1Service};
|
||||
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
|
||||
use crate::h2::H2Service;
|
||||
use crate::helpers::{Data, DataFactory};
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use crate::service::HttpService;
|
||||
use crate::{ConnectCallback, Extensions};
|
||||
|
||||
/// A http service builder
|
||||
/// A HTTP service builder
|
||||
///
|
||||
/// This type can be used to construct an instance of `http service` through a
|
||||
/// This type can be used to construct an instance of [`HttpService`] through a
|
||||
/// builder-like pattern.
|
||||
pub struct HttpServiceBuilder<T, S, X = ExpectHandler> {
|
||||
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
|
||||
keep_alive: KeepAlive,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
secure: bool,
|
||||
local_addr: Option<net::SocketAddr>,
|
||||
expect: X,
|
||||
upgrade: Option<U>,
|
||||
// DEPRECATED: in favor of on_connect_ext
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_t: PhantomData<(T, S)>,
|
||||
}
|
||||
|
||||
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler>
|
||||
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
{
|
||||
/// Create instance of `ServiceConfigBuilder`
|
||||
pub fn new() -> Self {
|
||||
@@ -37,29 +47,52 @@ where
|
||||
keep_alive: KeepAlive::Timeout(5),
|
||||
client_timeout: 5000,
|
||||
client_disconnect: 0,
|
||||
secure: false,
|
||||
local_addr: None,
|
||||
expect: ExpectHandler,
|
||||
upgrade: None,
|
||||
on_connect: None,
|
||||
on_connect_ext: None,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S, X> HttpServiceBuilder<T, S, X>
|
||||
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
X: NewService<Request = Request, Response = Request>,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
<X::Service as Service>::Future: 'static,
|
||||
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
U::InitError: fmt::Debug,
|
||||
<U::Service as Service>::Future: 'static,
|
||||
{
|
||||
/// Set server keep-alive setting.
|
||||
///
|
||||
/// By default keep alive is set to a 5 seconds.
|
||||
pub fn keep_alive<U: Into<KeepAlive>>(mut self, val: U) -> Self {
|
||||
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
|
||||
self.keep_alive = val.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// Set connection secure state
|
||||
pub fn secure(mut self) -> Self {
|
||||
self.secure = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the local address that this service is bound to.
|
||||
pub fn local_addr(mut self, addr: net::SocketAddr) -> Self {
|
||||
self.local_addr = Some(addr);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set server client timeout in milliseconds for first request.
|
||||
///
|
||||
/// Defines a timeout for reading client request header. If a client does not transmit
|
||||
@@ -87,30 +120,95 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
// #[cfg(feature = "ssl")]
|
||||
// /// Configure alpn protocols for SslAcceptorBuilder.
|
||||
// pub fn configure_openssl(
|
||||
// builder: &mut openssl::ssl::SslAcceptorBuilder,
|
||||
// ) -> io::Result<()> {
|
||||
// let protos: &[u8] = b"\x02h2";
|
||||
// builder.set_alpn_select_callback(|_, protos| {
|
||||
// const H2: &[u8] = b"\x02h2";
|
||||
// if protos.windows(3).any(|window| window == H2) {
|
||||
// Ok(b"h2")
|
||||
// } else {
|
||||
// Err(openssl::ssl::AlpnError::NOACK)
|
||||
// }
|
||||
// });
|
||||
// builder.set_alpn_protos(&protos)?;
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
/// Finish service configuration and create *http service* for HTTP/1 protocol.
|
||||
pub fn h1<F, P, B>(self, service: F) -> H1Service<T, P, S, B, X>
|
||||
/// Provide service for `EXPECT: 100-Continue` support.
|
||||
///
|
||||
/// Service get called with request that contains `EXPECT` header.
|
||||
/// Service must return request in case of success, in that case
|
||||
/// request will be forwarded to main service.
|
||||
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
F: IntoNewService<S, SrvConfig>,
|
||||
F: IntoServiceFactory<X1>,
|
||||
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X1::Error: Into<Error>,
|
||||
X1::InitError: fmt::Debug,
|
||||
<X1::Service as Service>::Future: 'static,
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
client_timeout: self.client_timeout,
|
||||
client_disconnect: self.client_disconnect,
|
||||
secure: self.secure,
|
||||
local_addr: self.local_addr,
|
||||
expect: expect.into_factory(),
|
||||
upgrade: self.upgrade,
|
||||
on_connect: self.on_connect,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide service for custom `Connection: UPGRADE` support.
|
||||
///
|
||||
/// If service is provided then normal requests handling get halted
|
||||
/// and this service get called with original request and framed object.
|
||||
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
|
||||
where
|
||||
F: IntoServiceFactory<U1>,
|
||||
U1: ServiceFactory<
|
||||
Config = (),
|
||||
Request = (Request, Framed<T, Codec>),
|
||||
Response = (),
|
||||
>,
|
||||
U1::Error: fmt::Display,
|
||||
U1::InitError: fmt::Debug,
|
||||
<U1::Service as Service>::Future: 'static,
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
client_timeout: self.client_timeout,
|
||||
client_disconnect: self.client_disconnect,
|
||||
secure: self.secure,
|
||||
local_addr: self.local_addr,
|
||||
expect: self.expect,
|
||||
upgrade: Some(upgrade.into_factory()),
|
||||
on_connect: self.on_connect,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set on-connect callback.
|
||||
///
|
||||
/// Called once per connection. Return value of the call is stored in request extensions.
|
||||
///
|
||||
/// *SOFT DEPRECATED*: Prefer the `on_connect_ext` style callback.
|
||||
pub fn on_connect<F, I>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&T) -> I + 'static,
|
||||
I: Clone + 'static,
|
||||
{
|
||||
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the callback to be run on connection establishment.
|
||||
///
|
||||
/// Has mutable access to a data container that will be merged into request extensions.
|
||||
/// This enables transport layer data (like client certificates) to be accessed in middleware
|
||||
/// and handlers.
|
||||
pub fn on_connect_ext<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&T, &mut Extensions) + 'static,
|
||||
{
|
||||
self.on_connect_ext = Some(Rc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
|
||||
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
|
||||
where
|
||||
B: MessageBody,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
@@ -119,43 +217,62 @@ where
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
H1Service::with_config(cfg, service.into_new_service()).expect(self.expect)
|
||||
|
||||
H1Service::with_config(cfg, service.into_factory())
|
||||
.expect(self.expect)
|
||||
.upgrade(self.upgrade)
|
||||
.on_connect(self.on_connect)
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create *http service* for HTTP/2 protocol.
|
||||
pub fn h2<F, P, B>(self, service: F) -> H2Service<T, P, S, B>
|
||||
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
|
||||
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
F: IntoNewService<S, SrvConfig>,
|
||||
S::Error: Into<Error>,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
H2Service::with_config(cfg, service.into_new_service())
|
||||
|
||||
H2Service::with_config(cfg, service.into_factory())
|
||||
.on_connect(self.on_connect)
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create `HttpService` instance.
|
||||
pub fn finish<F, P, B>(self, service: F) -> HttpService<T, P, S, B>
|
||||
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
F: IntoNewService<S, SrvConfig>,
|
||||
S::Error: Into<Error>,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
HttpService::with_config(cfg, service.into_new_service())
|
||||
|
||||
HttpService::with_config(cfg, service.into_factory())
|
||||
.expect(self.expect)
|
||||
.upgrade(self.upgrade)
|
||||
.on_connect(self.on_connect)
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
}
|
||||
|
||||
39
actix-http/src/client/config.rs
Normal file
39
actix-http/src/client/config.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use std::time::Duration;
|
||||
|
||||
// These values are taken from hyper/src/proto/h2/client.rs
|
||||
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
|
||||
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
|
||||
|
||||
/// Connector configuration
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ConnectorConfig {
|
||||
pub(crate) timeout: Duration,
|
||||
pub(crate) conn_lifetime: Duration,
|
||||
pub(crate) conn_keep_alive: Duration,
|
||||
pub(crate) disconnect_timeout: Option<Duration>,
|
||||
pub(crate) limit: usize,
|
||||
pub(crate) conn_window_size: u32,
|
||||
pub(crate) stream_window_size: u32,
|
||||
}
|
||||
|
||||
impl Default for ConnectorConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout: Duration::from_secs(1),
|
||||
conn_lifetime: Duration::from_secs(75),
|
||||
conn_keep_alive: Duration::from_secs(15),
|
||||
disconnect_timeout: Some(Duration::from_millis(3000)),
|
||||
limit: 100,
|
||||
conn_window_size: DEFAULT_H2_CONN_WINDOW,
|
||||
stream_window_size: DEFAULT_H2_STREAM_WINDOW,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnectorConfig {
|
||||
pub(crate) fn no_disconnect_timeout(&self) -> Self {
|
||||
let mut res = self.clone();
|
||||
res.disconnect_timeout = None;
|
||||
res
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,21 @@
|
||||
use std::{fmt, io, time};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, io, mem, time};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use bytes::{Buf, Bytes};
|
||||
use futures::future::{err, Either, Future, FutureResult};
|
||||
use futures::Poll;
|
||||
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
|
||||
use h2::client::SendRequest;
|
||||
use pin_project::pin_project;
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::h1::ClientCodec;
|
||||
use crate::message::{RequestHead, ResponseHead};
|
||||
use crate::message::{RequestHeadType, ResponseHead};
|
||||
use crate::payload::Payload;
|
||||
|
||||
use super::error::SendRequestError;
|
||||
use super::pool::Acquired;
|
||||
use super::pool::{Acquired, Protocol};
|
||||
use super::{h1proto, h2proto};
|
||||
|
||||
pub(crate) enum ConnectionType<Io> {
|
||||
@@ -21,31 +24,32 @@ pub(crate) enum ConnectionType<Io> {
|
||||
}
|
||||
|
||||
pub trait Connection {
|
||||
type Io: AsyncRead + AsyncWrite;
|
||||
type Future: Future<Item = (ResponseHead, Payload), Error = SendRequestError>;
|
||||
type Io: AsyncRead + AsyncWrite + Unpin;
|
||||
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn protocol(&self) -> Protocol;
|
||||
|
||||
/// Send request and body
|
||||
fn send_request<B: MessageBody + 'static>(
|
||||
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
self,
|
||||
head: RequestHead,
|
||||
head: H,
|
||||
body: B,
|
||||
) -> Self::Future;
|
||||
|
||||
type TunnelFuture: Future<
|
||||
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
|
||||
Error = SendRequestError,
|
||||
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture;
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
|
||||
}
|
||||
|
||||
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
|
||||
/// Close connection
|
||||
fn close(&mut self);
|
||||
fn close(self: Pin<&mut Self>);
|
||||
|
||||
/// Release connection to the connection pool
|
||||
fn release(&mut self);
|
||||
fn release(self: Pin<&mut Self>);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
@@ -60,7 +64,7 @@ impl<T> fmt::Debug for IoConnection<T>
|
||||
where
|
||||
T: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.io {
|
||||
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
|
||||
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
|
||||
@@ -69,7 +73,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite> IoConnection<T> {
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
|
||||
pub(crate) fn new(
|
||||
io: ConnectionType<T>,
|
||||
created: time::Instant,
|
||||
@@ -89,49 +93,50 @@ impl<T: AsyncRead + AsyncWrite> IoConnection<T> {
|
||||
|
||||
impl<T> Connection for IoConnection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + 'static,
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Io = T;
|
||||
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
|
||||
type Future =
|
||||
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn send_request<B: MessageBody + 'static>(
|
||||
fn protocol(&self) -> Protocol {
|
||||
match self.io {
|
||||
Some(ConnectionType::H1(_)) => Protocol::Http1,
|
||||
Some(ConnectionType::H2(_)) => Protocol::Http2,
|
||||
None => Protocol::Http1,
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
mut self,
|
||||
head: RequestHead,
|
||||
head: H,
|
||||
body: B,
|
||||
) -> Self::Future {
|
||||
match self.io.take().unwrap() {
|
||||
ConnectionType::H1(io) => Box::new(h1proto::send_request(
|
||||
io,
|
||||
head,
|
||||
body,
|
||||
self.created,
|
||||
self.pool,
|
||||
)),
|
||||
ConnectionType::H2(io) => Box::new(h2proto::send_request(
|
||||
io,
|
||||
head,
|
||||
body,
|
||||
self.created,
|
||||
self.pool,
|
||||
)),
|
||||
ConnectionType::H1(io) => {
|
||||
h1proto::send_request(io, head.into(), body, self.created, self.pool)
|
||||
.boxed_local()
|
||||
}
|
||||
ConnectionType::H2(io) => {
|
||||
h2proto::send_request(io, head.into(), body, self.created, self.pool)
|
||||
.boxed_local()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TunnelFuture = Either<
|
||||
Box<
|
||||
Future<
|
||||
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
|
||||
Error = SendRequestError,
|
||||
>,
|
||||
LocalBoxFuture<
|
||||
'static,
|
||||
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>,
|
||||
FutureResult<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel(mut self, head: RequestHead) -> Self::TunnelFuture {
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
|
||||
match self.io.take().unwrap() {
|
||||
ConnectionType::H1(io) => {
|
||||
Either::A(Box::new(h1proto::open_tunnel(io, head)))
|
||||
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
|
||||
}
|
||||
ConnectionType::H2(io) => {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
@@ -141,7 +146,7 @@ where
|
||||
None,
|
||||
));
|
||||
}
|
||||
Either::B(err(SendRequestError::TunnelNotSupported))
|
||||
Either::Right(err(SendRequestError::TunnelNotSupported))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -155,15 +160,23 @@ pub(crate) enum EitherConnection<A, B> {
|
||||
|
||||
impl<A, B> Connection for EitherConnection<A, B>
|
||||
where
|
||||
A: AsyncRead + AsyncWrite + 'static,
|
||||
B: AsyncRead + AsyncWrite + 'static,
|
||||
A: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
B: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Io = EitherIo<A, B>;
|
||||
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
|
||||
type Future =
|
||||
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn send_request<RB: MessageBody + 'static>(
|
||||
fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
EitherConnection::A(con) => con.protocol(),
|
||||
EitherConnection::B(con) => con.protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
self,
|
||||
head: RequestHead,
|
||||
head: H,
|
||||
body: RB,
|
||||
) -> Self::Future {
|
||||
match self {
|
||||
@@ -172,44 +185,34 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
type TunnelFuture = Box<
|
||||
Future<
|
||||
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
|
||||
Error = SendRequestError,
|
||||
>,
|
||||
type TunnelFuture = LocalBoxFuture<
|
||||
'static,
|
||||
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture {
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
|
||||
match self {
|
||||
EitherConnection::A(con) => Box::new(
|
||||
con.open_tunnel(head)
|
||||
.map(|(head, framed)| (head, framed.map_io(EitherIo::A))),
|
||||
),
|
||||
EitherConnection::B(con) => Box::new(
|
||||
con.open_tunnel(head)
|
||||
.map(|(head, framed)| (head, framed.map_io(EitherIo::B))),
|
||||
),
|
||||
EitherConnection::A(con) => con
|
||||
.open_tunnel(head)
|
||||
.map(|res| {
|
||||
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
|
||||
})
|
||||
.boxed_local(),
|
||||
EitherConnection::B(con) => con
|
||||
.open_tunnel(head)
|
||||
.map(|res| {
|
||||
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
|
||||
})
|
||||
.boxed_local(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = EitherIoProj)]
|
||||
pub enum EitherIo<A, B> {
|
||||
A(A),
|
||||
B(B),
|
||||
}
|
||||
|
||||
impl<A, B> io::Read for EitherIo<A, B>
|
||||
where
|
||||
A: io::Read,
|
||||
B: io::Read,
|
||||
{
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self {
|
||||
EitherIo::A(ref mut val) => val.read(buf),
|
||||
EitherIo::B(ref mut val) => val.read(buf),
|
||||
}
|
||||
}
|
||||
A(#[pin] A),
|
||||
B(#[pin] B),
|
||||
}
|
||||
|
||||
impl<A, B> AsyncRead for EitherIo<A, B>
|
||||
@@ -217,7 +220,21 @@ where
|
||||
A: AsyncRead,
|
||||
B: AsyncRead,
|
||||
{
|
||||
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_read(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_read(cx, buf),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn prepare_uninitialized_buffer(
|
||||
&self,
|
||||
buf: &mut [mem::MaybeUninit<u8>],
|
||||
) -> bool {
|
||||
match self {
|
||||
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
|
||||
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
|
||||
@@ -225,45 +242,50 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> io::Write for EitherIo<A, B>
|
||||
where
|
||||
A: io::Write,
|
||||
B: io::Write,
|
||||
{
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
match self {
|
||||
EitherIo::A(ref mut val) => val.write(buf),
|
||||
EitherIo::B(ref mut val) => val.write(buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
match self {
|
||||
EitherIo::A(ref mut val) => val.flush(),
|
||||
EitherIo::B(ref mut val) => val.flush(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> AsyncWrite for EitherIo<A, B>
|
||||
where
|
||||
A: AsyncWrite,
|
||||
B: AsyncWrite,
|
||||
{
|
||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
||||
match self {
|
||||
EitherIo::A(ref mut val) => val.shutdown(),
|
||||
EitherIo::B(ref mut val) => val.shutdown(),
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_write(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_write(cx, buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_buf<U: Buf>(&mut self, buf: &mut U) -> Poll<usize, io::Error>
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_flush(cx),
|
||||
EitherIoProj::B(val) => val.poll_flush(cx),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_shutdown(cx),
|
||||
EitherIoProj::B(val) => val.poll_shutdown(cx),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_write_buf<U: Buf>(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut U,
|
||||
) -> Poll<Result<usize, io::Error>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
match self {
|
||||
EitherIo::A(ref mut val) => val.write_buf(buf),
|
||||
EitherIo::B(ref mut val) => val.write_buf(buf),
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,19 +6,33 @@ use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_connect::{
|
||||
default_connector, Connect as TcpConnect, Connection as TcpConnection,
|
||||
};
|
||||
use actix_service::{apply_fn, Service, ServiceExt};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{apply_fn, Service};
|
||||
use actix_utils::timeout::{TimeoutError, TimeoutService};
|
||||
use http::Uri;
|
||||
use tokio_tcp::TcpStream;
|
||||
|
||||
use super::config::ConnectorConfig;
|
||||
use super::connection::Connection;
|
||||
use super::error::ConnectError;
|
||||
use super::pool::{ConnectionPool, Protocol};
|
||||
use super::Connect;
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
use openssl::ssl::SslConnector;
|
||||
#[cfg(feature = "openssl")]
|
||||
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
|
||||
|
||||
#[cfg(not(feature = "ssl"))]
|
||||
#[cfg(feature = "rustls")]
|
||||
use actix_connect::ssl::rustls::ClientConfig;
|
||||
#[cfg(feature = "rustls")]
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
enum SslConnector {
|
||||
#[cfg(feature = "openssl")]
|
||||
Openssl(OpensslConnector),
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(Arc<ClientConfig>),
|
||||
}
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
type SslConnector = ();
|
||||
|
||||
/// Manages http client network connectivity
|
||||
@@ -35,17 +49,17 @@ type SslConnector = ();
|
||||
/// ```
|
||||
pub struct Connector<T, U> {
|
||||
connector: T,
|
||||
timeout: Duration,
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Duration,
|
||||
limit: usize,
|
||||
config: ConnectorConfig,
|
||||
#[allow(dead_code)]
|
||||
ssl: SslConnector,
|
||||
_t: PhantomData<U>,
|
||||
}
|
||||
|
||||
trait Io: AsyncRead + AsyncWrite + Unpin {}
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
|
||||
|
||||
impl Connector<(), ()> {
|
||||
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
|
||||
pub fn new() -> Connector<
|
||||
impl Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
@@ -54,40 +68,54 @@ impl Connector<(), ()> {
|
||||
> + Clone,
|
||||
TcpStream,
|
||||
> {
|
||||
let ssl = {
|
||||
#[cfg(feature = "ssl")]
|
||||
{
|
||||
use log::error;
|
||||
use openssl::ssl::{SslConnector, SslMethod};
|
||||
|
||||
let mut ssl = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||
let _ = ssl
|
||||
.set_alpn_protos(b"\x02h2\x08http/1.1")
|
||||
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
|
||||
ssl.build()
|
||||
}
|
||||
#[cfg(not(feature = "ssl"))]
|
||||
{}
|
||||
};
|
||||
|
||||
Connector {
|
||||
ssl,
|
||||
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
|
||||
connector: default_connector(),
|
||||
timeout: Duration::from_secs(1),
|
||||
conn_lifetime: Duration::from_secs(75),
|
||||
conn_keep_alive: Duration::from_secs(15),
|
||||
disconnect_timeout: Duration::from_millis(3000),
|
||||
limit: 100,
|
||||
config: ConnectorConfig::default(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
// Build Ssl connector with openssl, based on supplied alpn protocols
|
||||
#[cfg(feature = "openssl")]
|
||||
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
|
||||
use actix_connect::ssl::openssl::SslMethod;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
let mut alpn = BytesMut::with_capacity(20);
|
||||
for proto in protocols.iter() {
|
||||
alpn.put_u8(proto.len() as u8);
|
||||
alpn.put(proto.as_slice());
|
||||
}
|
||||
|
||||
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
|
||||
let _ = ssl
|
||||
.set_alpn_protos(&alpn)
|
||||
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
|
||||
SslConnector::Openssl(ssl.build())
|
||||
}
|
||||
|
||||
// Build Ssl connector with rustls, based on supplied alpn protocols
|
||||
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
|
||||
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
|
||||
let mut config = ClientConfig::new();
|
||||
config.set_protocols(&protocols);
|
||||
config
|
||||
.root_store
|
||||
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
|
||||
SslConnector::Rustls(Arc::new(config))
|
||||
}
|
||||
|
||||
// ssl turned off, provides empty ssl connector
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
|
||||
}
|
||||
|
||||
impl<T, U> Connector<T, U> {
|
||||
/// Use custom connector.
|
||||
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
|
||||
where
|
||||
U1: AsyncRead + AsyncWrite + fmt::Debug,
|
||||
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
T1: Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
Response = TcpConnection<Uri, U1>,
|
||||
@@ -96,11 +124,7 @@ impl<T, U> Connector<T, U> {
|
||||
{
|
||||
Connector {
|
||||
connector,
|
||||
timeout: self.timeout,
|
||||
conn_lifetime: self.conn_lifetime,
|
||||
conn_keep_alive: self.conn_keep_alive,
|
||||
disconnect_timeout: self.disconnect_timeout,
|
||||
limit: self.limit,
|
||||
config: self.config,
|
||||
ssl: self.ssl,
|
||||
_t: PhantomData,
|
||||
}
|
||||
@@ -109,24 +133,63 @@ impl<T, U> Connector<T, U> {
|
||||
|
||||
impl<T, U> Connector<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + fmt::Debug + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
T: Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
Response = TcpConnection<Uri, U>,
|
||||
Error = actix_connect::ConnectError,
|
||||
> + Clone,
|
||||
> + Clone
|
||||
+ 'static,
|
||||
{
|
||||
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
|
||||
/// Set to 1 second by default.
|
||||
pub fn timeout(mut self, timeout: Duration) -> Self {
|
||||
self.timeout = timeout;
|
||||
self.config.timeout = timeout;
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
#[cfg(feature = "openssl")]
|
||||
/// Use custom `SslConnector` instance.
|
||||
pub fn ssl(mut self, connector: SslConnector) -> Self {
|
||||
self.ssl = connector;
|
||||
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
|
||||
self.ssl = SslConnector::Openssl(connector);
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
|
||||
self.ssl = SslConnector::Rustls(connector);
|
||||
self
|
||||
}
|
||||
|
||||
/// Maximum supported http major version
|
||||
/// Supported versions http/1.1, http/2
|
||||
pub fn max_http_version(mut self, val: http::Version) -> Self {
|
||||
let versions = match val {
|
||||
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
|
||||
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
|
||||
_ => {
|
||||
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
|
||||
}
|
||||
};
|
||||
self.ssl = Connector::build_ssl(versions);
|
||||
self
|
||||
}
|
||||
|
||||
/// Indicates the initial window size (in octets) for
|
||||
/// HTTP2 stream-level flow control for received data.
|
||||
///
|
||||
/// The default value is 65,535 and is good for APIs, but not for big objects.
|
||||
pub fn initial_window_size(mut self, size: u32) -> Self {
|
||||
self.config.stream_window_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Indicates the initial window size (in octets) for
|
||||
/// HTTP2 connection-level flow control for received data.
|
||||
///
|
||||
/// The default value is 65,535 and is good for APIs, but not for big objects.
|
||||
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
|
||||
self.config.conn_window_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -135,7 +198,7 @@ where
|
||||
/// If limit is 0, the connector has no limit.
|
||||
/// The default limit size is 100.
|
||||
pub fn limit(mut self, limit: usize) -> Self {
|
||||
self.limit = limit;
|
||||
self.config.limit = limit;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -146,7 +209,7 @@ where
|
||||
/// exceeds this period, the connection is closed.
|
||||
/// Default keep-alive period is 15 seconds.
|
||||
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
|
||||
self.conn_keep_alive = dur;
|
||||
self.config.conn_keep_alive = dur;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -156,7 +219,7 @@ where
|
||||
/// until it is closed regardless of keep-alive period.
|
||||
/// Default lifetime period is 75 seconds.
|
||||
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
|
||||
self.conn_lifetime = dur;
|
||||
self.config.conn_lifetime = dur;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -169,7 +232,7 @@ where
|
||||
///
|
||||
/// By default disconnect timeout is set to 3000 milliseconds.
|
||||
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
|
||||
self.disconnect_timeout = dur;
|
||||
self.config.disconnect_timeout = Some(dur);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -178,15 +241,17 @@ where
|
||||
/// its combinator chain.
|
||||
pub fn finish(
|
||||
self,
|
||||
) -> impl Service<Request = Uri, Response = impl Connection, Error = ConnectError> + Clone
|
||||
{
|
||||
#[cfg(not(feature = "ssl"))]
|
||||
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
|
||||
+ Clone {
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
{
|
||||
let connector = TimeoutService::new(
|
||||
self.timeout,
|
||||
apply_fn(self.connector, |msg: Uri, srv| srv.call(msg.into()))
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
self.config.timeout,
|
||||
apply_fn(self.connector, |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
@@ -196,40 +261,66 @@ where
|
||||
connect_impl::InnerConnector {
|
||||
tcp_pool: ConnectionPool::new(
|
||||
connector,
|
||||
self.conn_lifetime,
|
||||
self.conn_keep_alive,
|
||||
None,
|
||||
self.limit,
|
||||
self.config.no_disconnect_timeout(),
|
||||
),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "ssl")]
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
{
|
||||
const H2: &[u8] = b"h2";
|
||||
use actix_connect::ssl::OpensslConnector;
|
||||
#[cfg(feature = "openssl")]
|
||||
use actix_connect::ssl::openssl::OpensslConnector;
|
||||
#[cfg(feature = "rustls")]
|
||||
use actix_connect::ssl::rustls::{RustlsConnector, Session};
|
||||
use actix_service::{boxed::service, pipeline};
|
||||
|
||||
let ssl_service = TimeoutService::new(
|
||||
self.timeout,
|
||||
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
|
||||
.map_err(ConnectError::from)
|
||||
.and_then(
|
||||
OpensslConnector::service(self.ssl)
|
||||
.map_err(ConnectError::from)
|
||||
self.config.timeout,
|
||||
pipeline(
|
||||
apply_fn(self.connector.clone(), |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from),
|
||||
)
|
||||
.and_then(match self.ssl {
|
||||
#[cfg(feature = "openssl")]
|
||||
SslConnector::Openssl(ssl) => service(
|
||||
OpensslConnector::service(ssl)
|
||||
.map(|stream| {
|
||||
let sock = stream.into_parts().0;
|
||||
let h2 = sock
|
||||
.get_ref()
|
||||
.ssl()
|
||||
.selected_alpn_protocol()
|
||||
.map(|protos| protos.windows(2).any(|w| w == H2))
|
||||
.unwrap_or(false);
|
||||
if h2 {
|
||||
(sock, Protocol::Http2)
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
|
||||
} else {
|
||||
(sock, Protocol::Http1)
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
|
||||
}
|
||||
})
|
||||
.map_err(ConnectError::from),
|
||||
),
|
||||
#[cfg(feature = "rustls")]
|
||||
SslConnector::Rustls(ssl) => service(
|
||||
RustlsConnector::service(ssl)
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| {
|
||||
let sock = stream.into_parts().0;
|
||||
let h2 = sock
|
||||
.get_ref()
|
||||
.1
|
||||
.get_alpn_protocol()
|
||||
.map(|protos| protos.windows(2).any(|w| w == H2))
|
||||
.unwrap_or(false);
|
||||
if h2 {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
|
||||
} else {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
|
||||
}
|
||||
}),
|
||||
),
|
||||
}),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
@@ -237,10 +328,12 @@ where
|
||||
});
|
||||
|
||||
let tcp_service = TimeoutService::new(
|
||||
self.timeout,
|
||||
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
self.config.timeout,
|
||||
apply_fn(self.connector, |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
@@ -250,53 +343,37 @@ where
|
||||
connect_impl::InnerConnector {
|
||||
tcp_pool: ConnectionPool::new(
|
||||
tcp_service,
|
||||
self.conn_lifetime,
|
||||
self.conn_keep_alive,
|
||||
None,
|
||||
self.limit,
|
||||
),
|
||||
ssl_pool: ConnectionPool::new(
|
||||
ssl_service,
|
||||
self.conn_lifetime,
|
||||
self.conn_keep_alive,
|
||||
Some(self.disconnect_timeout),
|
||||
self.limit,
|
||||
self.config.no_disconnect_timeout(),
|
||||
),
|
||||
ssl_pool: ConnectionPool::new(ssl_service, self.config),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.0-alpha4", note = "please use `.finish()` method")]
|
||||
pub fn service(
|
||||
self,
|
||||
) -> impl Service<Request = Uri, Response = impl Connection, Error = ConnectError> + Clone
|
||||
{
|
||||
self.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "ssl"))]
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
mod connect_impl {
|
||||
use futures::future::{err, Either, FutureResult};
|
||||
use futures::Poll;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures_util::future::{err, Either, Ready};
|
||||
|
||||
use super::*;
|
||||
use crate::client::connection::IoConnection;
|
||||
|
||||
pub(crate) struct InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
pub(crate) tcp_pool: ConnectionPool<T, Io>,
|
||||
}
|
||||
|
||||
impl<T, Io> Clone for InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ Clone,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
InnerConnector {
|
||||
@@ -307,48 +384,52 @@ mod connect_impl {
|
||||
|
||||
impl<T, Io> Service for InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Uri;
|
||||
type Request = Connect;
|
||||
type Response = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<
|
||||
<ConnectionPool<T, Io> as Service>::Future,
|
||||
FutureResult<IoConnection<Io>, ConnectError>,
|
||||
Ready<Result<IoConnection<Io>, ConnectError>>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
self.tcp_pool.poll_ready()
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tcp_pool.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Uri) -> Self::Future {
|
||||
match req.scheme_str() {
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
match req.uri.scheme_str() {
|
||||
Some("https") | Some("wss") => {
|
||||
Either::B(err(ConnectError::SslIsNotSupported))
|
||||
Either::Right(err(ConnectError::SslIsNotSupported))
|
||||
}
|
||||
_ => Either::A(self.tcp_pool.call(req)),
|
||||
_ => Either::Left(self.tcp_pool.call(req)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
mod connect_impl {
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::future::{Either, FutureResult};
|
||||
use futures::{Async, Future, Poll};
|
||||
use futures_core::ready;
|
||||
use futures_util::future::Either;
|
||||
|
||||
use super::*;
|
||||
use crate::client::connection::EitherConnection;
|
||||
|
||||
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
{
|
||||
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
|
||||
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
|
||||
@@ -356,12 +437,12 @@ mod connect_impl {
|
||||
|
||||
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ Clone,
|
||||
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ Clone,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
InnerConnector {
|
||||
@@ -373,91 +454,94 @@ mod connect_impl {
|
||||
|
||||
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Uri;
|
||||
type Request = Connect;
|
||||
type Response = EitherConnection<Io1, Io2>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<
|
||||
FutureResult<Self::Response, Self::Error>,
|
||||
Either<
|
||||
InnerConnectorResponseA<T1, Io1, Io2>,
|
||||
InnerConnectorResponseB<T2, Io1, Io2>,
|
||||
>,
|
||||
InnerConnectorResponseA<T1, Io1, Io2>,
|
||||
InnerConnectorResponseB<T2, Io1, Io2>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
self.tcp_pool.poll_ready()
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tcp_pool.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Uri) -> Self::Future {
|
||||
match req.scheme_str() {
|
||||
Some("https") | Some("wss") => {
|
||||
Either::B(Either::B(InnerConnectorResponseB {
|
||||
fut: self.ssl_pool.call(req),
|
||||
_t: PhantomData,
|
||||
}))
|
||||
}
|
||||
_ => Either::B(Either::A(InnerConnectorResponseA {
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
match req.uri.scheme_str() {
|
||||
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
|
||||
fut: self.ssl_pool.call(req),
|
||||
_t: PhantomData,
|
||||
}),
|
||||
_ => Either::Left(InnerConnectorResponseA {
|
||||
fut: self.tcp_pool.call(req),
|
||||
_t: PhantomData,
|
||||
})),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: <ConnectionPool<T, Io1> as Service>::Future,
|
||||
_t: PhantomData<Io2>,
|
||||
}
|
||||
|
||||
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
|
||||
where
|
||||
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Item = EitherConnection<Io1, Io2>;
|
||||
type Error = ConnectError;
|
||||
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.fut.poll()? {
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::Ready(res) => Ok(Async::Ready(EitherConnection::A(res))),
|
||||
}
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(
|
||||
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
|
||||
.map(EitherConnection::A),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
|
||||
where
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: <ConnectionPool<T, Io2> as Service>::Future,
|
||||
_t: PhantomData<Io1>,
|
||||
}
|
||||
|
||||
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
|
||||
where
|
||||
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
Io1: AsyncRead + AsyncWrite + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Item = EitherConnection<Io1, Io2>;
|
||||
type Error = ConnectError;
|
||||
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.fut.poll()? {
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::Ready(res) => Ok(Async::Ready(EitherConnection::B(res))),
|
||||
}
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(
|
||||
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
|
||||
.map(EitherConnection::B),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
use std::io;
|
||||
|
||||
use actix_connect::resolver::ResolveError;
|
||||
use derive_more::{Display, From};
|
||||
use trust_dns_resolver::error::ResolveError;
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
use openssl::ssl::{Error as SslError, HandshakeError};
|
||||
#[cfg(feature = "openssl")]
|
||||
use actix_connect::ssl::openssl::{HandshakeError, SslError};
|
||||
|
||||
use crate::error::{Error, ParseError, ResponseError};
|
||||
use crate::http::Error as HttpError;
|
||||
use crate::response::Response;
|
||||
use crate::http::{Error as HttpError, StatusCode};
|
||||
|
||||
/// A set of errors that can occur while connecting to an HTTP host
|
||||
#[derive(Debug, Display, From)]
|
||||
@@ -18,10 +17,15 @@ pub enum ConnectError {
|
||||
SslIsNotSupported,
|
||||
|
||||
/// SSL error
|
||||
#[cfg(feature = "ssl")]
|
||||
#[cfg(feature = "openssl")]
|
||||
#[display(fmt = "{}", _0)]
|
||||
SslError(SslError),
|
||||
|
||||
/// SSL Handshake error
|
||||
#[cfg(feature = "openssl")]
|
||||
#[display(fmt = "{}", _0)]
|
||||
SslHandshakeError(String),
|
||||
|
||||
/// Failed to resolve the hostname
|
||||
#[display(fmt = "Failed resolving hostname: {}", _0)]
|
||||
Resolver(ResolveError),
|
||||
@@ -35,7 +39,7 @@ pub enum ConnectError {
|
||||
H2(h2::Error),
|
||||
|
||||
/// Connecting took too long
|
||||
#[display(fmt = "Timeout out while establishing connection")]
|
||||
#[display(fmt = "Timeout while establishing connection")]
|
||||
Timeout,
|
||||
|
||||
/// Connector has been disconnected
|
||||
@@ -44,33 +48,31 @@ pub enum ConnectError {
|
||||
|
||||
/// Unresolved host name
|
||||
#[display(fmt = "Connector received `Connect` method with unresolved host")]
|
||||
Unresolverd,
|
||||
Unresolved,
|
||||
|
||||
/// Connection io error
|
||||
#[display(fmt = "{}", _0)]
|
||||
Io(io::Error),
|
||||
}
|
||||
|
||||
impl std::error::Error for ConnectError {}
|
||||
|
||||
impl From<actix_connect::ConnectError> for ConnectError {
|
||||
fn from(err: actix_connect::ConnectError) -> ConnectError {
|
||||
match err {
|
||||
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
|
||||
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
|
||||
actix_connect::ConnectError::InvalidInput => panic!(),
|
||||
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
|
||||
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
|
||||
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
impl<T> From<HandshakeError<T>> for ConnectError {
|
||||
#[cfg(feature = "openssl")]
|
||||
impl<T: std::fmt::Debug> From<HandshakeError<T>> for ConnectError {
|
||||
fn from(err: HandshakeError<T>) -> ConnectError {
|
||||
match err {
|
||||
HandshakeError::SetupFailure(stack) => SslError::from(stack).into(),
|
||||
HandshakeError::Failure(stream) => stream.into_error().into(),
|
||||
HandshakeError::WouldBlock(stream) => stream.into_error().into(),
|
||||
}
|
||||
ConnectError::SslHandshakeError(format!("{:?}", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +88,8 @@ pub enum InvalidUrl {
|
||||
HttpError(http::Error),
|
||||
}
|
||||
|
||||
impl std::error::Error for InvalidUrl {}
|
||||
|
||||
/// A set of errors that can occur during request sending and response reading
|
||||
#[derive(Debug, Display, From)]
|
||||
pub enum SendRequestError {
|
||||
@@ -106,7 +110,7 @@ pub enum SendRequestError {
|
||||
#[display(fmt = "{}", _0)]
|
||||
H2(h2::Error),
|
||||
/// Response took too long
|
||||
#[display(fmt = "Timeout out while waiting for response")]
|
||||
#[display(fmt = "Timeout while waiting for response")]
|
||||
Timeout,
|
||||
/// Tunnels are not supported for http2 connection
|
||||
#[display(fmt = "Tunnels are not supported for http2 connection")]
|
||||
@@ -115,16 +119,39 @@ pub enum SendRequestError {
|
||||
Body(Error),
|
||||
}
|
||||
|
||||
impl std::error::Error for SendRequestError {}
|
||||
|
||||
/// Convert `SendRequestError` to a server `Response`
|
||||
impl ResponseError for SendRequestError {
|
||||
fn error_response(&self) -> Response {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match *self {
|
||||
SendRequestError::Connect(ConnectError::Timeout) => {
|
||||
Response::GatewayTimeout()
|
||||
StatusCode::GATEWAY_TIMEOUT
|
||||
}
|
||||
SendRequestError::Connect(_) => Response::BadGateway(),
|
||||
_ => Response::InternalServerError(),
|
||||
SendRequestError::Connect(_) => StatusCode::BAD_REQUEST,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of errors that can occur during freezing a request
|
||||
#[derive(Debug, Display, From)]
|
||||
pub enum FreezeRequestError {
|
||||
/// Invalid URL
|
||||
#[display(fmt = "Invalid URL: {}", _0)]
|
||||
Url(InvalidUrl),
|
||||
/// Http error
|
||||
#[display(fmt = "{}", _0)]
|
||||
Http(HttpError),
|
||||
}
|
||||
|
||||
impl std::error::Error for FreezeRequestError {}
|
||||
|
||||
impl From<FreezeRequestError> for SendRequestError {
|
||||
fn from(e: FreezeRequestError) -> Self {
|
||||
match e {
|
||||
FreezeRequestError::Url(e) => e.into(),
|
||||
FreezeRequestError::Http(e) => e.into(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
use std::{io, time};
|
||||
use std::io::Write;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{io, mem, time};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use bytes::Bytes;
|
||||
use futures::future::{ok, Either};
|
||||
use futures::{Async, Future, Poll, Sink, Stream};
|
||||
use bytes::buf::BufMutExt;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use futures_util::future::poll_fn;
|
||||
use futures_util::{pin_mut, SinkExt, StreamExt};
|
||||
|
||||
use crate::error::PayloadError;
|
||||
use crate::h1;
|
||||
use crate::message::{RequestHead, ResponseHead};
|
||||
use crate::header::HeaderMap;
|
||||
use crate::http::header::{IntoHeaderValue, HOST};
|
||||
use crate::message::{RequestHeadType, ResponseHead};
|
||||
use crate::payload::{Payload, PayloadStream};
|
||||
|
||||
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
|
||||
@@ -15,98 +22,162 @@ use super::error::{ConnectError, SendRequestError};
|
||||
use super::pool::Acquired;
|
||||
use crate::body::{BodySize, MessageBody};
|
||||
|
||||
pub(crate) fn send_request<T, B>(
|
||||
pub(crate) async fn send_request<T, B>(
|
||||
io: T,
|
||||
head: RequestHead,
|
||||
mut head: RequestHeadType,
|
||||
body: B,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
|
||||
) -> Result<(ResponseHead, Payload), SendRequestError>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + 'static,
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
// set request host header
|
||||
if !head.as_ref().headers.contains_key(HOST)
|
||||
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
|
||||
{
|
||||
if let Some(host) = head.as_ref().uri.host() {
|
||||
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
|
||||
|
||||
let _ = match head.as_ref().uri.port_u16() {
|
||||
None | Some(80) | Some(443) => write!(wrt, "{}", host),
|
||||
Some(port) => write!(wrt, "{}:{}", host, port),
|
||||
};
|
||||
|
||||
match wrt.get_mut().split().freeze().try_into() {
|
||||
Ok(value) => match head {
|
||||
RequestHeadType::Owned(ref mut head) => {
|
||||
head.headers.insert(HOST, value)
|
||||
}
|
||||
RequestHeadType::Rc(_, ref mut extra_headers) => {
|
||||
let headers = extra_headers.get_or_insert(HeaderMap::new());
|
||||
headers.insert(HOST, value)
|
||||
}
|
||||
},
|
||||
Err(e) => log::error!("Can not set HOST header {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let io = H1Connection {
|
||||
created,
|
||||
pool,
|
||||
io: Some(io),
|
||||
};
|
||||
|
||||
let len = body.length();
|
||||
// create Framed and send request
|
||||
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
|
||||
framed_inner.send((head, body.size()).into()).await?;
|
||||
|
||||
// create Framed and send reqest
|
||||
Framed::new(io, h1::ClientCodec::default())
|
||||
.send((head, len).into())
|
||||
.from_err()
|
||||
// send request body
|
||||
.and_then(move |framed| match body.length() {
|
||||
BodySize::None | BodySize::Empty | BodySize::Sized(0) => {
|
||||
Either::A(ok(framed))
|
||||
}
|
||||
_ => Either::B(SendBody::new(body, framed)),
|
||||
})
|
||||
// read response and init read body
|
||||
.and_then(|framed| {
|
||||
framed
|
||||
.into_future()
|
||||
.map_err(|(e, _)| SendRequestError::from(e))
|
||||
.and_then(|(item, framed)| {
|
||||
if let Some(res) = item {
|
||||
match framed.get_codec().message_type() {
|
||||
h1::MessageType::None => {
|
||||
let force_close = !framed.get_codec().keepalive();
|
||||
release_connection(framed, force_close);
|
||||
Ok((res, Payload::None))
|
||||
}
|
||||
_ => {
|
||||
let pl: PayloadStream = Box::new(PlStream::new(framed));
|
||||
Ok((res, pl.into()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(ConnectError::Disconnected.into())
|
||||
}
|
||||
})
|
||||
})
|
||||
// send request body
|
||||
match body.size() {
|
||||
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
|
||||
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
|
||||
};
|
||||
|
||||
// read response and init read body
|
||||
let res = Pin::new(&mut framed_inner).into_future().await;
|
||||
let (head, framed) = if let (Some(result), framed) = res {
|
||||
let item = result.map_err(SendRequestError::from)?;
|
||||
(item, framed)
|
||||
} else {
|
||||
return Err(SendRequestError::from(ConnectError::Disconnected));
|
||||
};
|
||||
|
||||
match framed.codec_ref().message_type() {
|
||||
h1::MessageType::None => {
|
||||
let force_close = !framed.codec_ref().keepalive();
|
||||
release_connection(framed, force_close);
|
||||
Ok((head, Payload::None))
|
||||
}
|
||||
_ => {
|
||||
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
|
||||
Ok((head, pl.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn open_tunnel<T>(
|
||||
pub(crate) async fn open_tunnel<T>(
|
||||
io: T,
|
||||
head: RequestHead,
|
||||
) -> impl Future<Item = (ResponseHead, Framed<T, h1::ClientCodec>), Error = SendRequestError>
|
||||
head: RequestHeadType,
|
||||
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + 'static,
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
// create Framed and send reqest
|
||||
Framed::new(io, h1::ClientCodec::default())
|
||||
.send((head, BodySize::None).into())
|
||||
.from_err()
|
||||
// read response
|
||||
.and_then(|framed| {
|
||||
framed
|
||||
.into_future()
|
||||
.map_err(|(e, _)| SendRequestError::from(e))
|
||||
.and_then(|(head, framed)| {
|
||||
if let Some(head) = head {
|
||||
Ok((head, framed))
|
||||
// create Framed and send request
|
||||
let mut framed = Framed::new(io, h1::ClientCodec::default());
|
||||
framed.send((head, BodySize::None).into()).await?;
|
||||
|
||||
// read response
|
||||
if let (Some(result), framed) = framed.into_future().await {
|
||||
let head = result.map_err(SendRequestError::from)?;
|
||||
Ok((head, framed))
|
||||
} else {
|
||||
Err(SendRequestError::from(ConnectError::Disconnected))
|
||||
}
|
||||
}
|
||||
|
||||
/// send request body to the peer
|
||||
pub(crate) async fn send_body<T, B>(
|
||||
body: B,
|
||||
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
|
||||
) -> Result<(), SendRequestError>
|
||||
where
|
||||
T: ConnectionLifetime + Unpin,
|
||||
B: MessageBody,
|
||||
{
|
||||
pin_mut!(body);
|
||||
|
||||
let mut eof = false;
|
||||
while !eof {
|
||||
while !eof && !framed.as_ref().is_write_buf_full() {
|
||||
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
|
||||
Some(result) => {
|
||||
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
|
||||
}
|
||||
None => {
|
||||
eof = true;
|
||||
framed.as_mut().write(h1::Message::Chunk(None))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !framed.as_ref().is_write_buf_empty() {
|
||||
poll_fn(|cx| match framed.as_mut().flush(cx) {
|
||||
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
|
||||
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
|
||||
Poll::Pending => {
|
||||
if !framed.as_ref().is_write_buf_full() {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Err(SendRequestError::from(ConnectError::Disconnected))
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
SinkExt::flush(Pin::into_inner(framed)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// HTTP client connection
|
||||
pub struct H1Connection<T> {
|
||||
/// T should be `Unpin`
|
||||
io: Option<T>,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T> {
|
||||
impl<T> ConnectionLifetime for H1Connection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
/// Close connection
|
||||
fn close(&mut self) {
|
||||
fn close(mut self: Pin<&mut Self>) {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
if let Some(io) = self.io.take() {
|
||||
pool.close(IoConnection::new(
|
||||
@@ -119,7 +190,7 @@ impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T>
|
||||
}
|
||||
|
||||
/// Release this connection to the connection pool
|
||||
fn release(&mut self) {
|
||||
fn release(mut self: Pin<&mut Self>) {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
if let Some(io) = self.io.take() {
|
||||
pool.release(IoConnection::new(
|
||||
@@ -132,143 +203,96 @@ impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + 'static> io::Read for H1Connection<T> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
self.io.as_mut().unwrap().read(buf)
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
|
||||
unsafe fn prepare_uninitialized_buffer(
|
||||
&self,
|
||||
buf: &mut [mem::MaybeUninit<u8>],
|
||||
) -> bool {
|
||||
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
|
||||
}
|
||||
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + 'static> AsyncRead for H1Connection<T> {}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + 'static> io::Write for H1Connection<T> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.io.as_mut().unwrap().write(buf)
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
|
||||
fn poll_write(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.io.as_mut().unwrap().flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + 'static> AsyncWrite for H1Connection<T> {
|
||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
||||
self.io.as_mut().unwrap().shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
/// Future responsible for sending request body to the peer
|
||||
pub(crate) struct SendBody<I, B> {
|
||||
body: Option<B>,
|
||||
framed: Option<Framed<I, h1::ClientCodec>>,
|
||||
flushed: bool,
|
||||
}
|
||||
|
||||
impl<I, B> SendBody<I, B>
|
||||
where
|
||||
I: AsyncRead + AsyncWrite + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
pub(crate) fn new(body: B, framed: Framed<I, h1::ClientCodec>) -> Self {
|
||||
SendBody {
|
||||
body: Some(body),
|
||||
framed: Some(framed),
|
||||
flushed: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, B> Future for SendBody<I, B>
|
||||
where
|
||||
I: ConnectionLifetime,
|
||||
B: MessageBody,
|
||||
{
|
||||
type Item = Framed<I, h1::ClientCodec>;
|
||||
type Error = SendRequestError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut body_ready = true;
|
||||
loop {
|
||||
while body_ready
|
||||
&& self.body.is_some()
|
||||
&& !self.framed.as_ref().unwrap().is_write_buf_full()
|
||||
{
|
||||
match self.body.as_mut().unwrap().poll_next()? {
|
||||
Async::Ready(item) => {
|
||||
// check if body is done
|
||||
if item.is_none() {
|
||||
let _ = self.body.take();
|
||||
}
|
||||
self.flushed = false;
|
||||
self.framed
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.force_send(h1::Message::Chunk(item))?;
|
||||
break;
|
||||
}
|
||||
Async::NotReady => body_ready = false,
|
||||
}
|
||||
}
|
||||
|
||||
if !self.flushed {
|
||||
match self.framed.as_mut().unwrap().poll_complete()? {
|
||||
Async::Ready(_) => {
|
||||
self.flushed = true;
|
||||
continue;
|
||||
}
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
|
||||
if self.body.is_none() {
|
||||
return Ok(Async::Ready(self.framed.take().unwrap()));
|
||||
}
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
fn poll_flush(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), io::Error>> {
|
||||
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct PlStream<Io> {
|
||||
#[pin]
|
||||
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
|
||||
}
|
||||
|
||||
impl<Io: ConnectionLifetime> PlStream<Io> {
|
||||
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
|
||||
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
|
||||
|
||||
PlStream {
|
||||
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())),
|
||||
framed: Some(framed),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
|
||||
type Item = Bytes;
|
||||
type Error = PayloadError;
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.framed.as_mut().unwrap().poll()? {
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::Ready(Some(chunk)) => {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.project();
|
||||
|
||||
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Some(chunk)) => {
|
||||
if let Some(chunk) = chunk {
|
||||
Ok(Async::Ready(Some(chunk)))
|
||||
Poll::Ready(Some(Ok(chunk)))
|
||||
} else {
|
||||
let framed = self.framed.take().unwrap();
|
||||
let force_close = framed.get_codec().keepalive();
|
||||
let framed = this.framed.as_mut().as_pin_mut().unwrap();
|
||||
let force_close = !framed.codec_ref().keepalive();
|
||||
release_connection(framed, force_close);
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
}
|
||||
}
|
||||
Async::Ready(None) => Ok(Async::Ready(None)),
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool)
|
||||
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
|
||||
where
|
||||
T: ConnectionLifetime,
|
||||
{
|
||||
let mut parts = framed.into_parts();
|
||||
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() {
|
||||
parts.io.release()
|
||||
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
|
||||
framed.io_pin().release()
|
||||
} else {
|
||||
parts.io.close()
|
||||
framed.io_pin().close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,174 +1,178 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::future::Future;
|
||||
use std::time;
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use bytes::Bytes;
|
||||
use futures::future::{err, Either};
|
||||
use futures::{Async, Future, Poll};
|
||||
use h2::{client::SendRequest, SendStream};
|
||||
use futures_util::future::poll_fn;
|
||||
use futures_util::pin_mut;
|
||||
use h2::{
|
||||
client::{Builder, Connection, SendRequest},
|
||||
SendStream,
|
||||
};
|
||||
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
|
||||
use http::{request::Request, HttpTryFrom, Method, Version};
|
||||
use http::{request::Request, Method, Version};
|
||||
|
||||
use crate::body::{BodySize, MessageBody};
|
||||
use crate::message::{RequestHead, ResponseHead};
|
||||
use crate::header::HeaderMap;
|
||||
use crate::message::{RequestHeadType, ResponseHead};
|
||||
use crate::payload::Payload;
|
||||
|
||||
use super::config::ConnectorConfig;
|
||||
use super::connection::{ConnectionType, IoConnection};
|
||||
use super::error::SendRequestError;
|
||||
use super::pool::Acquired;
|
||||
|
||||
pub(crate) fn send_request<T, B>(
|
||||
io: SendRequest<Bytes>,
|
||||
head: RequestHead,
|
||||
pub(crate) async fn send_request<T, B>(
|
||||
mut io: SendRequest<Bytes>,
|
||||
head: RequestHeadType,
|
||||
body: B,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
|
||||
) -> Result<(ResponseHead, Payload), SendRequestError>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + 'static,
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
trace!("Sending client request: {:?} {:?}", head, body.length());
|
||||
let head_req = head.method == Method::HEAD;
|
||||
let length = body.length();
|
||||
let eof = match length {
|
||||
BodySize::None | BodySize::Empty | BodySize::Sized(0) => true,
|
||||
_ => false,
|
||||
trace!("Sending client request: {:?} {:?}", head, body.size());
|
||||
let head_req = head.as_ref().method == Method::HEAD;
|
||||
let length = body.size();
|
||||
let eof = matches!(
|
||||
length,
|
||||
BodySize::None | BodySize::Empty | BodySize::Sized(0)
|
||||
);
|
||||
|
||||
let mut req = Request::new(());
|
||||
*req.uri_mut() = head.as_ref().uri.clone();
|
||||
*req.method_mut() = head.as_ref().method.clone();
|
||||
*req.version_mut() = Version::HTTP_2;
|
||||
|
||||
let mut skip_len = true;
|
||||
// let mut has_date = false;
|
||||
|
||||
// Content length
|
||||
let _ = match length {
|
||||
BodySize::None => None,
|
||||
BodySize::Stream => {
|
||||
skip_len = false;
|
||||
None
|
||||
}
|
||||
BodySize::Empty => req
|
||||
.headers_mut()
|
||||
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
|
||||
BodySize::Sized(len) => req.headers_mut().insert(
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::try_from(format!("{}", len)).unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
io.ready()
|
||||
.map_err(SendRequestError::from)
|
||||
.and_then(move |mut io| {
|
||||
let mut req = Request::new(());
|
||||
*req.uri_mut() = head.uri;
|
||||
*req.method_mut() = head.method;
|
||||
*req.version_mut() = Version::HTTP_2;
|
||||
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
|
||||
let (head, extra_headers) = match head {
|
||||
RequestHeadType::Owned(head) => (RequestHeadType::Owned(head), HeaderMap::new()),
|
||||
RequestHeadType::Rc(head, extra_headers) => (
|
||||
RequestHeadType::Rc(head, None),
|
||||
extra_headers.unwrap_or_else(HeaderMap::new),
|
||||
),
|
||||
};
|
||||
|
||||
let mut skip_len = true;
|
||||
// let mut has_date = false;
|
||||
// merging headers from head and extra headers.
|
||||
let headers = head
|
||||
.as_ref()
|
||||
.headers
|
||||
.iter()
|
||||
.filter(|(name, _)| !extra_headers.contains_key(*name))
|
||||
.chain(extra_headers.iter());
|
||||
|
||||
// Content length
|
||||
let _ = match length {
|
||||
BodySize::None => None,
|
||||
BodySize::Stream => {
|
||||
skip_len = false;
|
||||
None
|
||||
}
|
||||
BodySize::Empty => req
|
||||
.headers_mut()
|
||||
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
|
||||
BodySize::Sized(len) => req.headers_mut().insert(
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::try_from(format!("{}", len)).unwrap(),
|
||||
),
|
||||
BodySize::Sized64(len) => req.headers_mut().insert(
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::try_from(format!("{}", len)).unwrap(),
|
||||
),
|
||||
};
|
||||
// copy headers
|
||||
for (key, value) in headers {
|
||||
match *key {
|
||||
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
|
||||
CONTENT_LENGTH if skip_len => continue,
|
||||
// DATE => has_date = true,
|
||||
_ => (),
|
||||
}
|
||||
req.headers_mut().append(key, value.clone());
|
||||
}
|
||||
|
||||
// copy headers
|
||||
for (key, value) in head.headers.iter() {
|
||||
match *key {
|
||||
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
|
||||
CONTENT_LENGTH if skip_len => continue,
|
||||
// DATE => has_date = true,
|
||||
_ => (),
|
||||
}
|
||||
req.headers_mut().append(key, value.clone());
|
||||
let res = poll_fn(|cx| io.poll_ready(cx)).await;
|
||||
if let Err(e) = res {
|
||||
release(io, pool, created, e.is_io());
|
||||
return Err(SendRequestError::from(e));
|
||||
}
|
||||
|
||||
let resp = match io.send_request(req, eof) {
|
||||
Ok((fut, send)) => {
|
||||
release(io, pool, created, false);
|
||||
|
||||
if !eof {
|
||||
send_body(body, send).await?;
|
||||
}
|
||||
fut.await.map_err(SendRequestError::from)?
|
||||
}
|
||||
Err(e) => {
|
||||
release(io, pool, created, e.is_io());
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
match io.send_request(req, eof) {
|
||||
Ok((res, send)) => {
|
||||
release(io, pool, created, false);
|
||||
let (parts, body) = resp.into_parts();
|
||||
let payload = if head_req { Payload::None } else { body.into() };
|
||||
|
||||
if !eof {
|
||||
Either::A(Either::B(
|
||||
SendBody {
|
||||
body,
|
||||
send,
|
||||
buf: None,
|
||||
}
|
||||
.and_then(move |_| res.map_err(SendRequestError::from)),
|
||||
))
|
||||
} else {
|
||||
Either::B(res.map_err(SendRequestError::from))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
release(io, pool, created, e.is_io());
|
||||
Either::A(Either::A(err(e.into())))
|
||||
}
|
||||
}
|
||||
})
|
||||
.and_then(move |resp| {
|
||||
let (parts, body) = resp.into_parts();
|
||||
let payload = if head_req { Payload::None } else { body.into() };
|
||||
|
||||
let mut head = ResponseHead::new(parts.status);
|
||||
head.version = parts.version;
|
||||
head.headers = parts.headers.into();
|
||||
|
||||
Ok((head, payload))
|
||||
})
|
||||
.from_err()
|
||||
let mut head = ResponseHead::new(parts.status);
|
||||
head.version = parts.version;
|
||||
head.headers = parts.headers.into();
|
||||
Ok((head, payload))
|
||||
}
|
||||
|
||||
struct SendBody<B: MessageBody> {
|
||||
async fn send_body<B: MessageBody>(
|
||||
body: B,
|
||||
send: SendStream<Bytes>,
|
||||
buf: Option<Bytes>,
|
||||
}
|
||||
|
||||
impl<B: MessageBody> Future for SendBody<B> {
|
||||
type Item = ();
|
||||
type Error = SendRequestError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
if self.buf.is_none() {
|
||||
match self.body.poll_next() {
|
||||
Ok(Async::Ready(Some(buf))) => {
|
||||
self.send.reserve_capacity(buf.len());
|
||||
self.buf = Some(buf);
|
||||
}
|
||||
Ok(Async::Ready(None)) => {
|
||||
if let Err(e) = self.send.send_data(Bytes::new(), true) {
|
||||
return Err(e.into());
|
||||
}
|
||||
self.send.reserve_capacity(0);
|
||||
return Ok(Async::Ready(()));
|
||||
}
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => return Err(e.into()),
|
||||
mut send: SendStream<Bytes>,
|
||||
) -> Result<(), SendRequestError> {
|
||||
let mut buf = None;
|
||||
pin_mut!(body);
|
||||
loop {
|
||||
if buf.is_none() {
|
||||
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
|
||||
Some(Ok(b)) => {
|
||||
send.reserve_capacity(b.len());
|
||||
buf = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
match self.send.poll_capacity() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(())),
|
||||
Ok(Async::Ready(Some(cap))) => {
|
||||
let mut buf = self.buf.take().unwrap();
|
||||
let len = buf.len();
|
||||
let bytes = buf.split_to(std::cmp::min(cap, len));
|
||||
|
||||
if let Err(e) = self.send.send_data(bytes, false) {
|
||||
Some(Err(e)) => return Err(e.into()),
|
||||
None => {
|
||||
if let Err(e) = send.send_data(Bytes::new(), true) {
|
||||
return Err(e.into());
|
||||
} else {
|
||||
if !buf.is_empty() {
|
||||
self.send.reserve_capacity(buf.len());
|
||||
self.buf = Some(buf);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
send.reserve_capacity(0);
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
match poll_fn(|cx| send.poll_capacity(cx)).await {
|
||||
None => return Ok(()),
|
||||
Some(Ok(cap)) => {
|
||||
let b = buf.as_mut().unwrap();
|
||||
let len = b.len();
|
||||
let bytes = b.split_to(std::cmp::min(cap, len));
|
||||
|
||||
if let Err(e) = send.send_data(bytes, false) {
|
||||
return Err(e.into());
|
||||
} else {
|
||||
if !b.is_empty() {
|
||||
send.reserve_capacity(b.len());
|
||||
} else {
|
||||
buf = None;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Some(Err(e)) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// release SendRequest object
|
||||
fn release<T: AsyncRead + AsyncWrite + 'static>(
|
||||
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
|
||||
io: SendRequest<Bytes>,
|
||||
pool: Option<Acquired<T>>,
|
||||
created: time::Instant,
|
||||
@@ -182,3 +186,18 @@ fn release<T: AsyncRead + AsyncWrite + 'static>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn handshake<Io>(
|
||||
io: Io,
|
||||
config: &ConnectorConfig,
|
||||
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
let mut builder = Builder::new();
|
||||
builder
|
||||
.initial_window_size(config.stream_window_size)
|
||||
.initial_connection_window_size(config.conn_window_size)
|
||||
.enable_push(false);
|
||||
builder.handshake(io)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
//! Http client api
|
||||
use http::Uri;
|
||||
|
||||
mod config;
|
||||
mod connection;
|
||||
mod connector;
|
||||
mod error;
|
||||
@@ -8,4 +11,11 @@ mod pool;
|
||||
|
||||
pub use self::connection::Connection;
|
||||
pub use self::connector::Connector;
|
||||
pub use self::error::{ConnectError, InvalidUrl, SendRequestError};
|
||||
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
|
||||
pub use self::pool::Protocol;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Connect {
|
||||
pub uri: Uri,
|
||||
pub addr: Option<std::net::SocketAddr>,
|
||||
}
|
||||
|
||||
@@ -1,28 +1,32 @@
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::rc::Rc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::time::{delay_for, Delay};
|
||||
use actix_service::Service;
|
||||
use actix_utils::{oneshot, task::LocalWaker};
|
||||
use bytes::Bytes;
|
||||
use futures::future::{err, ok, Either, FutureResult};
|
||||
use futures::task::AtomicTask;
|
||||
use futures::unsync::oneshot;
|
||||
use futures::{Async, Future, Poll};
|
||||
use h2::client::{handshake, Handshake};
|
||||
use hashbrown::HashMap;
|
||||
use http::uri::{Authority, Uri};
|
||||
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
|
||||
use fxhash::FxHashMap;
|
||||
use h2::client::{Connection, SendRequest};
|
||||
use http::uri::Authority;
|
||||
use indexmap::IndexSet;
|
||||
use pin_project::pin_project;
|
||||
use slab::Slab;
|
||||
use tokio_timer::{sleep, Delay};
|
||||
|
||||
use super::config::ConnectorConfig;
|
||||
use super::connection::{ConnectionType, IoConnection};
|
||||
use super::error::ConnectError;
|
||||
use super::h2proto::handshake;
|
||||
use super::Connect;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
/// Protocol version
|
||||
pub enum Protocol {
|
||||
Http1,
|
||||
Http2,
|
||||
@@ -40,234 +44,202 @@ impl From<Authority> for Key {
|
||||
}
|
||||
|
||||
/// Connections pool
|
||||
pub(crate) struct ConnectionPool<T, Io: AsyncRead + AsyncWrite + 'static>(
|
||||
T,
|
||||
Rc<RefCell<Inner<Io>>>,
|
||||
);
|
||||
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
|
||||
|
||||
impl<T, Io> ConnectionPool<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
connector: T,
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Option<Duration>,
|
||||
limit: usize,
|
||||
) -> Self {
|
||||
ConnectionPool(
|
||||
connector,
|
||||
Rc::new(RefCell::new(Inner {
|
||||
conn_lifetime,
|
||||
conn_keep_alive,
|
||||
disconnect_timeout,
|
||||
limit,
|
||||
acquired: 0,
|
||||
waiters: Slab::new(),
|
||||
waiters_queue: IndexSet::new(),
|
||||
available: HashMap::new(),
|
||||
task: AtomicTask::new(),
|
||||
})),
|
||||
)
|
||||
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
|
||||
let connector_rc = Rc::new(RefCell::new(connector));
|
||||
let inner_rc = Rc::new(RefCell::new(Inner {
|
||||
config,
|
||||
acquired: 0,
|
||||
waiters: Slab::new(),
|
||||
waiters_queue: IndexSet::new(),
|
||||
available: FxHashMap::default(),
|
||||
waker: LocalWaker::new(),
|
||||
}));
|
||||
|
||||
// start support future
|
||||
actix_rt::spawn(ConnectorPoolSupport {
|
||||
connector: Rc::clone(&connector_rc),
|
||||
inner: Rc::clone(&inner_rc),
|
||||
});
|
||||
|
||||
ConnectionPool(connector_rc, inner_rc)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Clone for ConnectionPool<T, Io>
|
||||
where
|
||||
T: Clone,
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
Io: 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
ConnectionPool(self.0.clone(), self.1.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Service for ConnectionPool<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
|
||||
{
|
||||
type Request = Uri;
|
||||
type Response = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<
|
||||
FutureResult<Self::Response, Self::Error>,
|
||||
Either<WaitForConnection<Io>, OpenConnection<T::Future, Io>>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
self.0.poll_ready()
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Uri) -> Self::Future {
|
||||
let key = if let Some(authority) = req.authority_part() {
|
||||
authority.clone().into()
|
||||
} else {
|
||||
return Either::A(err(ConnectError::Unresolverd));
|
||||
};
|
||||
|
||||
// acquire connection
|
||||
match self.1.as_ref().borrow_mut().acquire(&key) {
|
||||
Acquire::Acquired(io, created) => {
|
||||
// use existing connection
|
||||
Either::A(ok(IoConnection::new(
|
||||
io,
|
||||
created,
|
||||
Some(Acquired(key, Some(self.1.clone()))),
|
||||
)))
|
||||
}
|
||||
Acquire::NotAvailable => {
|
||||
// connection is not available, wait
|
||||
let (rx, token) = self.1.as_ref().borrow_mut().wait_for(req);
|
||||
Either::B(Either::A(WaitForConnection {
|
||||
rx,
|
||||
key,
|
||||
token,
|
||||
inner: Some(self.1.clone()),
|
||||
}))
|
||||
}
|
||||
Acquire::Available => {
|
||||
// open new connection
|
||||
Either::B(Either::B(OpenConnection::new(
|
||||
key,
|
||||
self.1.clone(),
|
||||
self.0.call(req),
|
||||
)))
|
||||
}
|
||||
}
|
||||
impl<T, Io> Drop for ConnectionPool<T, Io> {
|
||||
fn drop(&mut self) {
|
||||
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
|
||||
self.1.borrow().waker.wake();
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct WaitForConnection<Io>
|
||||
impl<T, Io> Service for ConnectionPool<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Connect;
|
||||
type Response = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
let mut connector = self.0.clone();
|
||||
let inner = self.1.clone();
|
||||
|
||||
let fut = async move {
|
||||
let key = if let Some(authority) = req.uri.authority() {
|
||||
authority.clone().into()
|
||||
} else {
|
||||
return Err(ConnectError::Unresolved);
|
||||
};
|
||||
|
||||
// acquire connection
|
||||
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
|
||||
Acquire::Acquired(io, created) => {
|
||||
// use existing connection
|
||||
Ok(IoConnection::new(
|
||||
io,
|
||||
created,
|
||||
Some(Acquired(key, Some(inner))),
|
||||
))
|
||||
}
|
||||
Acquire::Available => {
|
||||
// open tcp connection
|
||||
let (io, proto) = connector.call(req).await?;
|
||||
|
||||
let config = inner.borrow().config.clone();
|
||||
|
||||
let guard = OpenGuard::new(key, inner);
|
||||
|
||||
if proto == Protocol::Http1 {
|
||||
Ok(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
Instant::now(),
|
||||
Some(guard.consume()),
|
||||
))
|
||||
} else {
|
||||
let (snd, connection) = handshake(io, &config).await?;
|
||||
actix_rt::spawn(connection.map(|_| ()));
|
||||
Ok(IoConnection::new(
|
||||
ConnectionType::H2(snd),
|
||||
Instant::now(),
|
||||
Some(guard.consume()),
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// connection is not available, wait
|
||||
let (rx, token) = inner.borrow_mut().wait_for(req);
|
||||
|
||||
let guard = WaiterGuard::new(key, token, inner);
|
||||
let res = match rx.await {
|
||||
Err(_) => Err(ConnectError::Disconnected),
|
||||
Ok(res) => res,
|
||||
};
|
||||
guard.consume();
|
||||
res
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fut.boxed_local()
|
||||
}
|
||||
}
|
||||
|
||||
struct WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
key: Key,
|
||||
token: usize,
|
||||
rx: oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
}
|
||||
|
||||
impl<Io> Drop for WaitForConnection<Io>
|
||||
impl<Io> WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
token,
|
||||
inner: Some(inner),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(mut self) {
|
||||
let _ = self.inner.take();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Drop for WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if let Some(i) = self.inner.take() {
|
||||
let mut inner = i.as_ref().borrow_mut();
|
||||
inner.release_waiter(&self.key, self.token);
|
||||
inner.check_availibility();
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Future for WaitForConnection<Io>
|
||||
struct OpenGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Item = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.rx.poll() {
|
||||
Ok(Async::Ready(item)) => match item {
|
||||
Err(err) => Err(err),
|
||||
Ok(conn) => {
|
||||
let _ = self.inner.take();
|
||||
Ok(Async::Ready(conn))
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => {
|
||||
let _ = self.inner.take();
|
||||
Err(ConnectError::Disconnected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct OpenConnection<F, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
{
|
||||
fut: F,
|
||||
key: Key,
|
||||
h2: Option<Handshake<Io, Bytes>>,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
}
|
||||
|
||||
impl<F, Io> OpenConnection<F, Io>
|
||||
impl<Io> OpenGuard<Io>
|
||||
where
|
||||
F: Future<Item = (Io, Protocol), Error = ConnectError>,
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>, fut: F) -> Self {
|
||||
OpenConnection {
|
||||
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
fut,
|
||||
inner: Some(inner),
|
||||
h2: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(mut self) -> Acquired<Io> {
|
||||
Acquired(self.key.clone(), self.inner.take())
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Io> Drop for OpenConnection<F, Io>
|
||||
impl<Io> Drop for OpenGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if let Some(inner) = self.inner.take() {
|
||||
let mut inner = inner.as_ref().borrow_mut();
|
||||
if let Some(i) = self.inner.take() {
|
||||
let mut inner = i.as_ref().borrow_mut();
|
||||
inner.release();
|
||||
inner.check_availibility();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Io> Future for OpenConnection<F, Io>
|
||||
where
|
||||
F: Future<Item = (Io, Protocol), Error = ConnectError>,
|
||||
Io: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type Item = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
if let Some(ref mut h2) = self.h2 {
|
||||
return match h2.poll() {
|
||||
Ok(Async::Ready((snd, connection))) => {
|
||||
tokio_current_thread::spawn(connection.map_err(|_| ()));
|
||||
Ok(Async::Ready(IoConnection::new(
|
||||
ConnectionType::H2(snd),
|
||||
Instant::now(),
|
||||
Some(Acquired(self.key.clone(), self.inner.clone())),
|
||||
)))
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(e) => Err(e.into()),
|
||||
};
|
||||
}
|
||||
|
||||
match self.fut.poll() {
|
||||
Err(err) => Err(err),
|
||||
Ok(Async::Ready((io, proto))) => {
|
||||
let _ = self.inner.take();
|
||||
if proto == Protocol::Http1 {
|
||||
Ok(Async::Ready(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
Instant::now(),
|
||||
Some(Acquired(self.key.clone(), self.inner.clone())),
|
||||
)))
|
||||
} else {
|
||||
self.h2 = Some(handshake(io));
|
||||
self.poll()
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -278,7 +250,6 @@ enum Acquire<T> {
|
||||
NotAvailable,
|
||||
}
|
||||
|
||||
// #[derive(Debug)]
|
||||
struct AvailableConnection<Io> {
|
||||
io: ConnectionType<Io>,
|
||||
used: Instant,
|
||||
@@ -286,15 +257,17 @@ struct AvailableConnection<Io> {
|
||||
}
|
||||
|
||||
pub(crate) struct Inner<Io> {
|
||||
conn_lifetime: Duration,
|
||||
conn_keep_alive: Duration,
|
||||
disconnect_timeout: Option<Duration>,
|
||||
limit: usize,
|
||||
config: ConnectorConfig,
|
||||
acquired: usize,
|
||||
available: HashMap<Key, VecDeque<AvailableConnection<Io>>>,
|
||||
waiters: Slab<(Uri, oneshot::Sender<Result<IoConnection<Io>, ConnectError>>)>,
|
||||
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
|
||||
waiters: Slab<
|
||||
Option<(
|
||||
Connect,
|
||||
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
|
||||
)>,
|
||||
>,
|
||||
waiters_queue: IndexSet<(Key, usize)>,
|
||||
task: AtomicTask,
|
||||
waker: LocalWaker,
|
||||
}
|
||||
|
||||
impl<Io> Inner<Io> {
|
||||
@@ -308,7 +281,79 @@ impl<Io> Inner<Io> {
|
||||
|
||||
fn release_waiter(&mut self, key: &Key, token: usize) {
|
||||
self.waiters.remove(token);
|
||||
self.waiters_queue.remove(&(key.clone(), token));
|
||||
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Inner<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
/// connection is not available, wait
|
||||
fn wait_for(
|
||||
&mut self,
|
||||
connect: Connect,
|
||||
) -> (
|
||||
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
|
||||
usize,
|
||||
) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let key: Key = connect.uri.authority().unwrap().clone().into();
|
||||
let entry = self.waiters.vacant_entry();
|
||||
let token = entry.key();
|
||||
entry.insert(Some((connect, tx)));
|
||||
assert!(self.waiters_queue.insert((key, token)));
|
||||
|
||||
(rx, token)
|
||||
}
|
||||
|
||||
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
|
||||
// check limits
|
||||
if self.config.limit > 0 && self.acquired >= self.config.limit {
|
||||
return Acquire::NotAvailable;
|
||||
}
|
||||
|
||||
self.reserve();
|
||||
|
||||
// check if open connection is available
|
||||
// cleanup stale connections at the same time
|
||||
if let Some(ref mut connections) = self.available.get_mut(key) {
|
||||
let now = Instant::now();
|
||||
while let Some(conn) = connections.pop_back() {
|
||||
// check if it still usable
|
||||
if (now - conn.used) > self.config.conn_keep_alive
|
||||
|| (now - conn.created) > self.config.conn_lifetime
|
||||
{
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = conn.io {
|
||||
actix_rt::spawn(CloseConnection::new(io, timeout))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut io = conn.io;
|
||||
let mut buf = [0; 2];
|
||||
if let ConnectionType::H1(ref mut s) = io {
|
||||
match Pin::new(s).poll_read(cx, &mut buf) {
|
||||
Poll::Pending => (),
|
||||
Poll::Ready(Ok(n)) if n > 0 => {
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = io {
|
||||
actix_rt::spawn(CloseConnection::new(
|
||||
io, timeout,
|
||||
))
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
return Acquire::Acquired(io, conn.created);
|
||||
}
|
||||
}
|
||||
}
|
||||
Acquire::Available
|
||||
}
|
||||
|
||||
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
|
||||
@@ -321,93 +366,22 @@ impl<Io> Inner<Io> {
|
||||
created,
|
||||
used: Instant::now(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Inner<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + 'static,
|
||||
{
|
||||
/// connection is not available, wait
|
||||
fn wait_for(
|
||||
&mut self,
|
||||
connect: Uri,
|
||||
) -> (
|
||||
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
|
||||
usize,
|
||||
) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let key: Key = connect.authority_part().unwrap().clone().into();
|
||||
let entry = self.waiters.vacant_entry();
|
||||
let token = entry.key();
|
||||
entry.insert((connect, tx));
|
||||
assert!(!self.waiters_queue.insert((key, token)));
|
||||
(rx, token)
|
||||
}
|
||||
|
||||
fn acquire(&mut self, key: &Key) -> Acquire<Io> {
|
||||
// check limits
|
||||
if self.limit > 0 && self.acquired >= self.limit {
|
||||
return Acquire::NotAvailable;
|
||||
}
|
||||
|
||||
self.reserve();
|
||||
|
||||
// check if open connection is available
|
||||
// cleanup stale connections at the same time
|
||||
if let Some(ref mut connections) = self.available.get_mut(key) {
|
||||
let now = Instant::now();
|
||||
while let Some(conn) = connections.pop_back() {
|
||||
// check if it still usable
|
||||
if (now - conn.used) > self.conn_keep_alive
|
||||
|| (now - conn.created) > self.conn_lifetime
|
||||
{
|
||||
if let Some(timeout) = self.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = conn.io {
|
||||
tokio_current_thread::spawn(CloseConnection::new(
|
||||
io, timeout,
|
||||
))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut io = conn.io;
|
||||
let mut buf = [0; 2];
|
||||
if let ConnectionType::H1(ref mut s) = io {
|
||||
match s.read(&mut buf) {
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
|
||||
Ok(n) if n > 0 => {
|
||||
if let Some(timeout) = self.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = io {
|
||||
tokio_current_thread::spawn(
|
||||
CloseConnection::new(io, timeout),
|
||||
)
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Ok(_) | Err(_) => continue,
|
||||
}
|
||||
}
|
||||
return Acquire::Acquired(io, conn.created);
|
||||
}
|
||||
}
|
||||
}
|
||||
Acquire::Available
|
||||
self.check_availability();
|
||||
}
|
||||
|
||||
fn release_close(&mut self, io: ConnectionType<Io>) {
|
||||
self.acquired -= 1;
|
||||
if let Some(timeout) = self.disconnect_timeout {
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = io {
|
||||
tokio_current_thread::spawn(CloseConnection::new(io, timeout))
|
||||
actix_rt::spawn(CloseConnection::new(io, timeout))
|
||||
}
|
||||
}
|
||||
self.check_availability();
|
||||
}
|
||||
|
||||
fn check_availibility(&self) {
|
||||
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
|
||||
self.task.notify()
|
||||
fn check_availability(&self) {
|
||||
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
|
||||
self.waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -419,39 +393,230 @@ struct CloseConnection<T> {
|
||||
|
||||
impl<T> CloseConnection<T>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
T: AsyncWrite + Unpin,
|
||||
{
|
||||
fn new(io: T, timeout: Duration) -> Self {
|
||||
CloseConnection {
|
||||
io,
|
||||
timeout: sleep(timeout),
|
||||
timeout: delay_for(timeout),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for CloseConnection<T>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
T: AsyncWrite + Unpin,
|
||||
{
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
type Output = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
match self.timeout.poll() {
|
||||
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
|
||||
Ok(Async::NotReady) => match self.io.shutdown() {
|
||||
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.timeout).poll(cx) {
|
||||
Poll::Ready(_) => Poll::Ready(()),
|
||||
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
|
||||
Poll::Ready(_) => Poll::Ready(()),
|
||||
Poll::Pending => Poll::Pending,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project]
|
||||
struct ConnectorPoolSupport<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
connector: T,
|
||||
inner: Rc<RefCell<Inner<Io>>>,
|
||||
}
|
||||
|
||||
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
|
||||
T::Future: 'static,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
|
||||
if Rc::strong_count(this.inner) == 1 {
|
||||
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
|
||||
// and we are safe to exit.
|
||||
return Poll::Ready(());
|
||||
}
|
||||
|
||||
let mut inner = this.inner.borrow_mut();
|
||||
inner.waker.register(cx.waker());
|
||||
|
||||
// check waiters
|
||||
loop {
|
||||
let (key, token) = {
|
||||
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
|
||||
(key.clone(), *token)
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
};
|
||||
if inner.waiters.get(token).unwrap().is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match inner.acquire(&key, cx) {
|
||||
Acquire::NotAvailable => break,
|
||||
Acquire::Acquired(io, created) => {
|
||||
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
|
||||
if let Err(conn) = tx.send(Ok(IoConnection::new(
|
||||
io,
|
||||
created,
|
||||
Some(Acquired(key.clone(), Some(this.inner.clone()))),
|
||||
))) {
|
||||
let (io, created) = conn.unwrap().into_inner();
|
||||
inner.release_conn(&key, io, created);
|
||||
}
|
||||
}
|
||||
Acquire::Available => {
|
||||
let (connect, tx) =
|
||||
inner.waiters.get_mut(token).unwrap().take().unwrap();
|
||||
OpenWaitingConnection::spawn(
|
||||
key.clone(),
|
||||
tx,
|
||||
this.inner.clone(),
|
||||
this.connector.call(connect),
|
||||
inner.config.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
let _ = inner.waiters_queue.swap_remove_index(0);
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(PinnedDrop)]
|
||||
struct OpenWaitingConnection<F, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: F,
|
||||
key: Key,
|
||||
h2: Option<
|
||||
LocalBoxFuture<
|
||||
'static,
|
||||
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
|
||||
>,
|
||||
>,
|
||||
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
config: ConnectorConfig,
|
||||
}
|
||||
|
||||
impl<F, Io> OpenWaitingConnection<F, Io>
|
||||
where
|
||||
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn spawn(
|
||||
key: Key,
|
||||
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
|
||||
inner: Rc<RefCell<Inner<Io>>>,
|
||||
fut: F,
|
||||
config: ConnectorConfig,
|
||||
) {
|
||||
actix_rt::spawn(OpenWaitingConnection {
|
||||
key,
|
||||
fut,
|
||||
h2: None,
|
||||
rx: Some(rx),
|
||||
inner: Some(inner),
|
||||
config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pinned_drop]
|
||||
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(self: Pin<&mut Self>) {
|
||||
if let Some(inner) = self.project().inner.take() {
|
||||
let mut inner = inner.as_ref().borrow_mut();
|
||||
inner.release();
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Io> Future for OpenWaitingConnection<F, Io>
|
||||
where
|
||||
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
|
||||
Io: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
if let Some(ref mut h2) = this.h2 {
|
||||
return match Pin::new(h2).poll(cx) {
|
||||
Poll::Ready(Ok((snd, connection))) => {
|
||||
actix_rt::spawn(connection.map(|_| ()));
|
||||
let rx = this.rx.take().unwrap();
|
||||
let _ = rx.send(Ok(IoConnection::new(
|
||||
ConnectionType::H2(snd),
|
||||
Instant::now(),
|
||||
Some(Acquired(this.key.clone(), this.inner.take())),
|
||||
)));
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(err)) => {
|
||||
let _ = this.inner.take();
|
||||
if let Some(rx) = this.rx.take() {
|
||||
let _ = rx.send(Err(ConnectError::H2(err)));
|
||||
}
|
||||
Poll::Ready(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
match this.fut.poll(cx) {
|
||||
Poll::Ready(Err(err)) => {
|
||||
let _ = this.inner.take();
|
||||
if let Some(rx) = this.rx.take() {
|
||||
let _ = rx.send(Err(err));
|
||||
}
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Ready(Ok((io, proto))) => {
|
||||
if proto == Protocol::Http1 {
|
||||
let rx = this.rx.take().unwrap();
|
||||
let _ = rx.send(Ok(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
Instant::now(),
|
||||
Some(Acquired(this.key.clone(), this.inner.take())),
|
||||
)));
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
*this.h2 = Some(handshake(io, this.config).boxed_local());
|
||||
self.poll(cx)
|
||||
}
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
|
||||
|
||||
impl<T> Acquired<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + 'static,
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
|
||||
if let Some(inner) = self.1.take() {
|
||||
|
||||
40
actix-http/src/cloneable.rs
Normal file
40
actix-http/src/cloneable.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::Service;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Service that allows to turn non-clone service to a service with `Clone` impl
|
||||
///
|
||||
/// # Panics
|
||||
/// CloneableService might panic with some creative use of thread local storage.
|
||||
/// See https://github.com/actix/actix-web/issues/1295 for example
|
||||
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
|
||||
|
||||
impl<T: Service> CloneableService<T> {
|
||||
pub(crate) fn new(service: T) -> Self {
|
||||
Self(Rc::new(RefCell::new(service)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Service> Clone for CloneableService<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Service> Service for CloneableService<T> {
|
||||
type Request = T::Request;
|
||||
type Response = T::Response;
|
||||
type Error = T::Error;
|
||||
type Future = T::Future;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.borrow_mut().poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: T::Request) -> Self::Future {
|
||||
self.0.borrow_mut().call(req)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
use std::cell::UnsafeCell;
|
||||
use std::fmt;
|
||||
use std::cell::Cell;
|
||||
use std::fmt::Write;
|
||||
use std::rc::Rc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::Duration;
|
||||
use std::{fmt, net};
|
||||
|
||||
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
|
||||
use bytes::BytesMut;
|
||||
use futures::{future, Future};
|
||||
use time;
|
||||
use tokio_timer::{sleep, Delay};
|
||||
use futures_util::{future, FutureExt};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
|
||||
const DATE_VALUE_LENGTH: usize = 29;
|
||||
@@ -17,7 +17,7 @@ const DATE_VALUE_LENGTH: usize = 29;
|
||||
pub enum KeepAlive {
|
||||
/// Keep alive in seconds
|
||||
Timeout(usize),
|
||||
/// Relay on OS to shutdown tcp connection
|
||||
/// Rely on OS to shutdown tcp connection
|
||||
Os,
|
||||
/// Disabled
|
||||
Disabled,
|
||||
@@ -47,6 +47,8 @@ struct Inner {
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
ka_enabled: bool,
|
||||
secure: bool,
|
||||
local_addr: Option<std::net::SocketAddr>,
|
||||
timer: DateService,
|
||||
}
|
||||
|
||||
@@ -58,7 +60,7 @@ impl Clone for ServiceConfig {
|
||||
|
||||
impl Default for ServiceConfig {
|
||||
fn default() -> Self {
|
||||
Self::new(KeepAlive::Timeout(5), 0, 0)
|
||||
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,6 +70,8 @@ impl ServiceConfig {
|
||||
keep_alive: KeepAlive,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
secure: bool,
|
||||
local_addr: Option<net::SocketAddr>,
|
||||
) -> ServiceConfig {
|
||||
let (keep_alive, ka_enabled) = match keep_alive {
|
||||
KeepAlive::Timeout(val) => (val as u64, true),
|
||||
@@ -85,10 +89,24 @@ impl ServiceConfig {
|
||||
ka_enabled,
|
||||
client_timeout,
|
||||
client_disconnect,
|
||||
secure,
|
||||
local_addr,
|
||||
timer: DateService::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Returns true if connection is secure(https)
|
||||
pub fn secure(&self) -> bool {
|
||||
self.0.secure
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Returns the local address that this server is bound to.
|
||||
pub fn local_addr(&self) -> Option<net::SocketAddr> {
|
||||
self.0.local_addr
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Keep alive duration if configured.
|
||||
pub fn keep_alive(&self) -> Option<Duration> {
|
||||
@@ -96,7 +114,7 @@ impl ServiceConfig {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Return state of connection keep-alive funcitonality
|
||||
/// Return state of connection keep-alive functionality
|
||||
pub fn keep_alive_enabled(&self) -> bool {
|
||||
self.0.ka_enabled
|
||||
}
|
||||
@@ -104,10 +122,10 @@ impl ServiceConfig {
|
||||
#[inline]
|
||||
/// Client timeout for first request.
|
||||
pub fn client_timer(&self) -> Option<Delay> {
|
||||
let delay = self.0.client_timeout;
|
||||
if delay != 0 {
|
||||
Some(Delay::new(
|
||||
self.0.timer.now() + Duration::from_millis(delay),
|
||||
let delay_time = self.0.client_timeout;
|
||||
if delay_time != 0 {
|
||||
Some(delay_until(
|
||||
self.0.timer.now() + Duration::from_millis(delay_time),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
@@ -138,7 +156,7 @@ impl ServiceConfig {
|
||||
/// Return keep-alive timer delay is configured.
|
||||
pub fn keep_alive_timer(&self) -> Option<Delay> {
|
||||
if let Some(ka) = self.0.keep_alive {
|
||||
Some(Delay::new(self.0.timer.now() + ka))
|
||||
Some(delay_until(self.0.timer.now() + ka))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -158,19 +176,25 @@ impl ServiceConfig {
|
||||
self.0.timer.now()
|
||||
}
|
||||
|
||||
pub(crate) fn set_date(&self, dst: &mut BytesMut) {
|
||||
#[doc(hidden)]
|
||||
pub fn set_date(&self, dst: &mut BytesMut) {
|
||||
let mut buf: [u8; 39] = [0; 39];
|
||||
buf[..6].copy_from_slice(b"date: ");
|
||||
buf[6..35].copy_from_slice(&self.0.timer.date().bytes);
|
||||
self.0
|
||||
.timer
|
||||
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
|
||||
buf[35..].copy_from_slice(b"\r\n\r\n");
|
||||
dst.extend_from_slice(&buf);
|
||||
}
|
||||
|
||||
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
|
||||
dst.extend_from_slice(&self.0.timer.date().bytes);
|
||||
self.0
|
||||
.timer
|
||||
.set_date(|date| dst.extend_from_slice(&date.bytes));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct Date {
|
||||
bytes: [u8; DATE_VALUE_LENGTH],
|
||||
pos: usize,
|
||||
@@ -185,9 +209,15 @@ impl Date {
|
||||
date.update();
|
||||
date
|
||||
}
|
||||
|
||||
fn update(&mut self) {
|
||||
self.pos = 0;
|
||||
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
|
||||
write!(
|
||||
self,
|
||||
"{}",
|
||||
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,28 +234,24 @@ impl fmt::Write for Date {
|
||||
struct DateService(Rc<DateServiceInner>);
|
||||
|
||||
struct DateServiceInner {
|
||||
current: UnsafeCell<Option<(Date, Instant)>>,
|
||||
current: Cell<Option<(Date, Instant)>>,
|
||||
}
|
||||
|
||||
impl DateServiceInner {
|
||||
fn new() -> Self {
|
||||
DateServiceInner {
|
||||
current: UnsafeCell::new(None),
|
||||
current: Cell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ref(&self) -> &Option<(Date, Instant)> {
|
||||
unsafe { &*self.current.get() }
|
||||
}
|
||||
|
||||
fn reset(&self) {
|
||||
unsafe { (&mut *self.current.get()).take() };
|
||||
self.current.take();
|
||||
}
|
||||
|
||||
fn update(&self) {
|
||||
let now = Instant::now();
|
||||
let date = Date::new();
|
||||
*(unsafe { &mut *self.current.get() }) = Some((date, now));
|
||||
self.current.set(Some((date, now)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,56 +261,55 @@ impl DateService {
|
||||
}
|
||||
|
||||
fn check_date(&self) {
|
||||
if self.0.get_ref().is_none() {
|
||||
if self.0.current.get().is_none() {
|
||||
self.0.update();
|
||||
|
||||
// periodic date update
|
||||
let s = self.clone();
|
||||
tokio_current_thread::spawn(sleep(Duration::from_millis(500)).then(
|
||||
move |_| {
|
||||
s.0.reset();
|
||||
future::ok(())
|
||||
},
|
||||
));
|
||||
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
|
||||
s.0.reset();
|
||||
future::ready(())
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
fn now(&self) -> Instant {
|
||||
self.check_date();
|
||||
self.0.get_ref().as_ref().unwrap().1
|
||||
self.0.current.get().unwrap().1
|
||||
}
|
||||
|
||||
fn date(&self) -> &Date {
|
||||
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
|
||||
self.check_date();
|
||||
|
||||
let item = self.0.get_ref().as_ref().unwrap();
|
||||
&item.0
|
||||
f(&self.0.current.get().unwrap().0);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use actix_rt::System;
|
||||
use futures::future;
|
||||
|
||||
// Test modifying the date from within the closure
|
||||
// passed to `set_date`
|
||||
#[test]
|
||||
fn test_evil_date() {
|
||||
let service = DateService::new();
|
||||
// Make sure that `check_date` doesn't try to spawn a task
|
||||
service.0.update();
|
||||
service.set_date(|_| service.0.reset());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_date_len() {
|
||||
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_date() {
|
||||
let mut rt = System::new("test");
|
||||
|
||||
let _ = rt.block_on(future::lazy(|| {
|
||||
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0);
|
||||
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf1);
|
||||
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf2);
|
||||
assert_eq!(buf1, buf2);
|
||||
future::ok::<_, ()>(())
|
||||
}));
|
||||
#[actix_rt::test]
|
||||
async fn test_date() {
|
||||
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
|
||||
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf1);
|
||||
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf2);
|
||||
assert_eq!(buf1, buf2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,240 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use time::{Duration, Tm};
|
||||
|
||||
use super::{Cookie, SameSite};
|
||||
|
||||
/// Structure that follows the builder pattern for building `Cookie` structs.
|
||||
///
|
||||
/// To construct a cookie:
|
||||
///
|
||||
/// 1. Call [`Cookie::build`](struct.Cookie.html#method.build) to start building.
|
||||
/// 2. Use any of the builder methods to set fields in the cookie.
|
||||
/// 3. Call [finish](#method.finish) to retrieve the built cookie.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
/// use time::Duration;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let cookie: Cookie = Cookie::build("name", "value")
|
||||
/// .domain("www.rust-lang.org")
|
||||
/// .path("/")
|
||||
/// .secure(true)
|
||||
/// .http_only(true)
|
||||
/// .max_age(Duration::days(1))
|
||||
/// .finish();
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CookieBuilder {
|
||||
/// The cookie being built.
|
||||
cookie: Cookie<'static>,
|
||||
}
|
||||
|
||||
impl CookieBuilder {
|
||||
/// Creates a new `CookieBuilder` instance from the given name and value.
|
||||
///
|
||||
/// This method is typically called indirectly via
|
||||
/// [Cookie::build](struct.Cookie.html#method.build).
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar").finish();
|
||||
/// assert_eq!(c.name_value(), ("foo", "bar"));
|
||||
/// ```
|
||||
pub fn new<N, V>(name: N, value: V) -> CookieBuilder
|
||||
where
|
||||
N: Into<Cow<'static, str>>,
|
||||
V: Into<Cow<'static, str>>,
|
||||
{
|
||||
CookieBuilder {
|
||||
cookie: Cookie::new(name, value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the `expires` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .expires(time::now())
|
||||
/// .finish();
|
||||
///
|
||||
/// assert!(c.expires().is_some());
|
||||
/// # }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn expires(mut self, when: Tm) -> CookieBuilder {
|
||||
self.cookie.set_expires(when);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `max_age` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .max_age(time::Duration::minutes(30))
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60)));
|
||||
/// # }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn max_age(mut self, value: Duration) -> CookieBuilder {
|
||||
self.cookie.set_max_age(value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `domain` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .domain("www.rust-lang.org")
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.domain(), Some("www.rust-lang.org"));
|
||||
/// ```
|
||||
pub fn domain<D: Into<Cow<'static, str>>>(mut self, value: D) -> CookieBuilder {
|
||||
self.cookie.set_domain(value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `path` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .path("/")
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.path(), Some("/"));
|
||||
/// ```
|
||||
pub fn path<P: Into<Cow<'static, str>>>(mut self, path: P) -> CookieBuilder {
|
||||
self.cookie.set_path(path);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `secure` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .secure(true)
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.secure(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn secure(mut self, value: bool) -> CookieBuilder {
|
||||
self.cookie.set_secure(value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `http_only` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .http_only(true)
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn http_only(mut self, value: bool) -> CookieBuilder {
|
||||
self.cookie.set_http_only(value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the `same_site` field in the cookie being built.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{Cookie, SameSite};
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .same_site(SameSite::Strict)
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.same_site(), Some(SameSite::Strict));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn same_site(mut self, value: SameSite) -> CookieBuilder {
|
||||
self.cookie.set_same_site(value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Makes the cookie being built 'permanent' by extending its expiration and
|
||||
/// max age 20 years into the future.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
/// use time::Duration;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .permanent()
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.max_age(), Some(Duration::days(365 * 20)));
|
||||
/// # assert!(c.expires().is_some());
|
||||
/// # }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn permanent(mut self) -> CookieBuilder {
|
||||
self.cookie.make_permanent();
|
||||
self
|
||||
}
|
||||
|
||||
/// Finishes building and returns the built `Cookie`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::build("foo", "bar")
|
||||
/// .domain("crates.io")
|
||||
/// .path("/")
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.name_value(), ("foo", "bar"));
|
||||
/// assert_eq!(c.domain(), Some("crates.io"));
|
||||
/// assert_eq!(c.path(), Some("/"));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn finish(self) -> Cookie<'static> {
|
||||
self.cookie
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use super::Cookie;
|
||||
|
||||
/// A `DeltaCookie` is a helper structure used in a cookie jar. It wraps a
|
||||
/// `Cookie` so that it can be hashed and compared purely by name. It further
|
||||
/// records whether the wrapped cookie is a "removal" cookie, that is, a cookie
|
||||
/// that when sent to the client removes the named cookie on the client's
|
||||
/// machine.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DeltaCookie {
|
||||
pub cookie: Cookie<'static>,
|
||||
pub removed: bool,
|
||||
}
|
||||
|
||||
impl DeltaCookie {
|
||||
/// Create a new `DeltaCookie` that is being added to a jar.
|
||||
#[inline]
|
||||
pub fn added(cookie: Cookie<'static>) -> DeltaCookie {
|
||||
DeltaCookie {
|
||||
cookie,
|
||||
removed: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `DeltaCookie` that is being removed from a jar. The
|
||||
/// `cookie` should be a "removal" cookie.
|
||||
#[inline]
|
||||
pub fn removed(cookie: Cookie<'static>) -> DeltaCookie {
|
||||
DeltaCookie {
|
||||
cookie,
|
||||
removed: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DeltaCookie {
|
||||
type Target = Cookie<'static>;
|
||||
|
||||
fn deref(&self) -> &Cookie<'static> {
|
||||
&self.cookie
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for DeltaCookie {
|
||||
fn deref_mut(&mut self) -> &mut Cookie<'static> {
|
||||
&mut self.cookie
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for DeltaCookie {
|
||||
fn eq(&self, other: &DeltaCookie) -> bool {
|
||||
self.name() == other.name()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for DeltaCookie {}
|
||||
|
||||
impl Hash for DeltaCookie {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.name().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<str> for DeltaCookie {
|
||||
fn borrow(&self) -> &str {
|
||||
self.name()
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
//! This module contains types that represent cookie properties that are not yet
|
||||
//! standardized. That is, _draft_ features.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
/// The `SameSite` cookie attribute.
|
||||
///
|
||||
/// A cookie with a `SameSite` attribute is imposed restrictions on when it is
|
||||
/// sent to the origin server in a cross-site request. If the `SameSite`
|
||||
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
|
||||
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
|
||||
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
|
||||
/// If the `SameSite` attribute is not present (made explicit via the
|
||||
/// `SameSite::None` variant), then the cookie will be sent as normal.
|
||||
///
|
||||
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
|
||||
/// are subject to change.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum SameSite {
|
||||
/// The "Strict" `SameSite` attribute.
|
||||
Strict,
|
||||
/// The "Lax" `SameSite` attribute.
|
||||
Lax,
|
||||
/// No `SameSite` attribute.
|
||||
None,
|
||||
}
|
||||
|
||||
impl SameSite {
|
||||
/// Returns `true` if `self` is `SameSite::Strict` and `false` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::SameSite;
|
||||
///
|
||||
/// let strict = SameSite::Strict;
|
||||
/// assert!(strict.is_strict());
|
||||
/// assert!(!strict.is_lax());
|
||||
/// assert!(!strict.is_none());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_strict(self) -> bool {
|
||||
match self {
|
||||
SameSite::Strict => true,
|
||||
SameSite::Lax | SameSite::None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is `SameSite::Lax` and `false` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::SameSite;
|
||||
///
|
||||
/// let lax = SameSite::Lax;
|
||||
/// assert!(lax.is_lax());
|
||||
/// assert!(!lax.is_strict());
|
||||
/// assert!(!lax.is_none());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_lax(self) -> bool {
|
||||
match self {
|
||||
SameSite::Lax => true,
|
||||
SameSite::Strict | SameSite::None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is `SameSite::None` and `false` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::SameSite;
|
||||
///
|
||||
/// let none = SameSite::None;
|
||||
/// assert!(none.is_none());
|
||||
/// assert!(!none.is_lax());
|
||||
/// assert!(!none.is_strict());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_none(self) -> bool {
|
||||
match self {
|
||||
SameSite::None => true,
|
||||
SameSite::Lax | SameSite::Strict => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SameSite {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
SameSite::Strict => write!(f, "Strict"),
|
||||
SameSite::Lax => write!(f, "Lax"),
|
||||
SameSite::None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,655 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
use std::mem::replace;
|
||||
|
||||
use time::{self, Duration};
|
||||
|
||||
use super::delta::DeltaCookie;
|
||||
use super::Cookie;
|
||||
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
use super::secure::{Key, PrivateJar, SignedJar};
|
||||
|
||||
/// A collection of cookies that tracks its modifications.
|
||||
///
|
||||
/// A `CookieJar` provides storage for any number of cookies. Any changes made
|
||||
/// to the jar are tracked; the changes can be retrieved via the
|
||||
/// [delta](#method.delta) method which returns an interator over the changes.
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// A jar's life begins via [new](#method.new) and calls to
|
||||
/// [`add_original`](#method.add_original):
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{Cookie, CookieJar};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add_original(Cookie::new("second", "another"));
|
||||
/// ```
|
||||
///
|
||||
/// Cookies can be added via [add](#method.add) and removed via
|
||||
/// [remove](#method.remove). Finally, cookies can be looked up via
|
||||
/// [get](#method.get):
|
||||
///
|
||||
/// ```rust
|
||||
/// # use actix_http::cookie::{Cookie, CookieJar};
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add(Cookie::new("a", "one"));
|
||||
/// jar.add(Cookie::new("b", "two"));
|
||||
///
|
||||
/// assert_eq!(jar.get("a").map(|c| c.value()), Some("one"));
|
||||
/// assert_eq!(jar.get("b").map(|c| c.value()), Some("two"));
|
||||
///
|
||||
/// jar.remove(Cookie::named("b"));
|
||||
/// assert!(jar.get("b").is_none());
|
||||
/// ```
|
||||
///
|
||||
/// # Deltas
|
||||
///
|
||||
/// A jar keeps track of any modifications made to it over time. The
|
||||
/// modifications are recorded as cookies. The modifications can be retrieved
|
||||
/// via [delta](#method.delta). Any new `Cookie` added to a jar via `add`
|
||||
/// results in the same `Cookie` appearing in the `delta`; cookies added via
|
||||
/// `add_original` do not count towards the delta. Any _original_ cookie that is
|
||||
/// removed from a jar results in a "removal" cookie appearing in the delta. A
|
||||
/// "removal" cookie is a cookie that a server sends so that the cookie is
|
||||
/// removed from the client's machine.
|
||||
///
|
||||
/// Deltas are typically used to create `Set-Cookie` headers corresponding to
|
||||
/// the changes made to a cookie jar over a period of time.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use actix_http::cookie::{Cookie, CookieJar};
|
||||
/// let mut jar = CookieJar::new();
|
||||
///
|
||||
/// // original cookies don't affect the delta
|
||||
/// jar.add_original(Cookie::new("original", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
///
|
||||
/// // new cookies result in an equivalent `Cookie` in the delta
|
||||
/// jar.add(Cookie::new("a", "one"));
|
||||
/// jar.add(Cookie::new("b", "two"));
|
||||
/// assert_eq!(jar.delta().count(), 2);
|
||||
///
|
||||
/// // removing an original cookie adds a "removal" cookie to the delta
|
||||
/// jar.remove(Cookie::named("original"));
|
||||
/// assert_eq!(jar.delta().count(), 3);
|
||||
///
|
||||
/// // removing a new cookie that was added removes that `Cookie` from the delta
|
||||
/// jar.remove(Cookie::named("a"));
|
||||
/// assert_eq!(jar.delta().count(), 2);
|
||||
/// ```
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct CookieJar {
|
||||
original_cookies: HashSet<DeltaCookie>,
|
||||
delta_cookies: HashSet<DeltaCookie>,
|
||||
}
|
||||
|
||||
impl CookieJar {
|
||||
/// Creates an empty cookie jar.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::CookieJar;
|
||||
///
|
||||
/// let jar = CookieJar::new();
|
||||
/// assert_eq!(jar.iter().count(), 0);
|
||||
/// ```
|
||||
pub fn new() -> CookieJar {
|
||||
CookieJar::default()
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Cookie` inside this jar with the name
|
||||
/// `name`. If no such cookie exists, returns `None`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// assert!(jar.get("name").is_none());
|
||||
///
|
||||
/// jar.add(Cookie::new("name", "value"));
|
||||
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
|
||||
/// ```
|
||||
pub fn get(&self, name: &str) -> Option<&Cookie<'static>> {
|
||||
self.delta_cookies
|
||||
.get(name)
|
||||
.or_else(|| self.original_cookies.get(name))
|
||||
.and_then(|c| if !c.removed { Some(&c.cookie) } else { None })
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to this jar. If an original cookie with the
|
||||
/// same name already exists, it is replaced with `cookie`. Cookies added
|
||||
/// with `add` take precedence and are not replaced by this method.
|
||||
///
|
||||
/// Adding an original cookie does not affect the [delta](#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add_original(Cookie::new("second", "two"));
|
||||
///
|
||||
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
|
||||
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
|
||||
/// assert_eq!(jar.iter().count(), 2);
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn add_original(&mut self, cookie: Cookie<'static>) {
|
||||
self.original_cookies.replace(DeltaCookie::added(cookie));
|
||||
}
|
||||
|
||||
/// Adds `cookie` to this jar. If a cookie with the same name already
|
||||
/// exists, it is replaced with `cookie`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add(Cookie::new("name", "value"));
|
||||
/// jar.add(Cookie::new("second", "two"));
|
||||
///
|
||||
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
|
||||
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
|
||||
/// assert_eq!(jar.iter().count(), 2);
|
||||
/// assert_eq!(jar.delta().count(), 2);
|
||||
/// ```
|
||||
pub fn add(&mut self, cookie: Cookie<'static>) {
|
||||
self.delta_cookies.replace(DeltaCookie::added(cookie));
|
||||
}
|
||||
|
||||
/// Removes `cookie` from this jar. If an _original_ cookie with the same
|
||||
/// name as `cookie` is present in the jar, a _removal_ cookie will be
|
||||
/// present in the `delta` computation. To properly generate the removal
|
||||
/// cookie, `cookie` must contain the same `path` and `domain` as the cookie
|
||||
/// that was initially set.
|
||||
///
|
||||
/// A "removal" cookie is a cookie that has the same name as the original
|
||||
/// cookie but has an empty value, a max-age of 0, and an expiration date
|
||||
/// far in the past.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Removing an _original_ cookie results in a _removal_ cookie:
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
/// use time::Duration;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let mut jar = CookieJar::new();
|
||||
///
|
||||
/// // Assume this cookie originally had a path of "/" and domain of "a.b".
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
///
|
||||
/// // If the path and domain were set, they must be provided to `remove`.
|
||||
/// jar.remove(Cookie::build("name", "").path("/").domain("a.b").finish());
|
||||
///
|
||||
/// // The delta will contain the removal cookie.
|
||||
/// let delta: Vec<_> = jar.delta().collect();
|
||||
/// assert_eq!(delta.len(), 1);
|
||||
/// assert_eq!(delta[0].name(), "name");
|
||||
/// assert_eq!(delta[0].max_age(), Some(Duration::seconds(0)));
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// Removing a new cookie does not result in a _removal_ cookie:
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add(Cookie::new("name", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 1);
|
||||
///
|
||||
/// jar.remove(Cookie::named("name"));
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn remove(&mut self, mut cookie: Cookie<'static>) {
|
||||
if self.original_cookies.contains(cookie.name()) {
|
||||
cookie.set_value("");
|
||||
cookie.set_max_age(Duration::seconds(0));
|
||||
cookie.set_expires(time::now() - Duration::days(365));
|
||||
self.delta_cookies.replace(DeltaCookie::removed(cookie));
|
||||
} else {
|
||||
self.delta_cookies.remove(cookie.name());
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes `cookie` from this jar completely. This method differs from
|
||||
/// `remove` in that no delta cookie is created under any condition. Neither
|
||||
/// the `delta` nor `iter` methods will return a cookie that is removed
|
||||
/// using this method.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Removing an _original_ cookie; no _removal_ cookie is generated:
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
/// use time::Duration;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let mut jar = CookieJar::new();
|
||||
///
|
||||
/// // Add an original cookie and a new cookie.
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add(Cookie::new("key", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 1);
|
||||
/// assert_eq!(jar.iter().count(), 2);
|
||||
///
|
||||
/// // Now force remove the original cookie.
|
||||
/// jar.force_remove(Cookie::new("name", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 1);
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
///
|
||||
/// // Now force remove the new cookie.
|
||||
/// jar.force_remove(Cookie::new("key", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// assert_eq!(jar.iter().count(), 0);
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) {
|
||||
self.original_cookies.remove(cookie.name());
|
||||
self.delta_cookies.remove(cookie.name());
|
||||
}
|
||||
|
||||
/// Removes all cookies from this cookie jar.
|
||||
#[deprecated(
|
||||
since = "0.7.0",
|
||||
note = "calling this method may not remove \
|
||||
all cookies since the path and domain are not specified; use \
|
||||
`remove` instead"
|
||||
)]
|
||||
pub fn clear(&mut self) {
|
||||
self.delta_cookies.clear();
|
||||
for delta in replace(&mut self.original_cookies, HashSet::new()) {
|
||||
self.remove(delta.cookie);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over cookies that represent the changes to this jar
|
||||
/// over time. These cookies can be rendered directly as `Set-Cookie` header
|
||||
/// values to affect the changes made to this jar on the client.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add_original(Cookie::new("second", "two"));
|
||||
///
|
||||
/// // Add new cookies.
|
||||
/// jar.add(Cookie::new("new", "third"));
|
||||
/// jar.add(Cookie::new("another", "fourth"));
|
||||
/// jar.add(Cookie::new("yac", "fifth"));
|
||||
///
|
||||
/// // Remove some cookies.
|
||||
/// jar.remove(Cookie::named("name"));
|
||||
/// jar.remove(Cookie::named("another"));
|
||||
///
|
||||
/// // Delta contains two new cookies ("new", "yac") and a removal ("name").
|
||||
/// assert_eq!(jar.delta().count(), 3);
|
||||
/// ```
|
||||
pub fn delta(&self) -> Delta {
|
||||
Delta {
|
||||
iter: self.delta_cookies.iter(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over all of the cookies present in this jar.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie};
|
||||
///
|
||||
/// let mut jar = CookieJar::new();
|
||||
///
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add_original(Cookie::new("second", "two"));
|
||||
///
|
||||
/// jar.add(Cookie::new("new", "third"));
|
||||
/// jar.add(Cookie::new("another", "fourth"));
|
||||
/// jar.add(Cookie::new("yac", "fifth"));
|
||||
///
|
||||
/// jar.remove(Cookie::named("name"));
|
||||
/// jar.remove(Cookie::named("another"));
|
||||
///
|
||||
/// // There are three cookies in the jar: "second", "new", and "yac".
|
||||
/// # assert_eq!(jar.iter().count(), 3);
|
||||
/// for cookie in jar.iter() {
|
||||
/// match cookie.name() {
|
||||
/// "second" => assert_eq!(cookie.value(), "two"),
|
||||
/// "new" => assert_eq!(cookie.value(), "third"),
|
||||
/// "yac" => assert_eq!(cookie.value(), "fifth"),
|
||||
/// _ => unreachable!("there are only three cookies in the jar")
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
pub fn iter(&self) -> Iter {
|
||||
Iter {
|
||||
delta_cookies: self
|
||||
.delta_cookies
|
||||
.iter()
|
||||
.chain(self.original_cookies.difference(&self.delta_cookies)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `PrivateJar` with `self` as its parent jar using the key `key`
|
||||
/// to sign/encrypt and verify/decrypt cookies added/retrieved from the
|
||||
/// child jar.
|
||||
///
|
||||
/// Any modifications to the child jar will be reflected on the parent jar,
|
||||
/// and any retrievals from the child jar will be made from the parent jar.
|
||||
///
|
||||
/// This method is only available when the `secure` feature is enabled.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{Cookie, CookieJar, Key};
|
||||
///
|
||||
/// // Generate a secure key.
|
||||
/// let key = Key::generate();
|
||||
///
|
||||
/// // Add a private (signed + encrypted) cookie.
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.private(&key).add(Cookie::new("private", "text"));
|
||||
///
|
||||
/// // The cookie's contents are encrypted.
|
||||
/// assert_ne!(jar.get("private").unwrap().value(), "text");
|
||||
///
|
||||
/// // They can be decrypted and verified through the child jar.
|
||||
/// assert_eq!(jar.private(&key).get("private").unwrap().value(), "text");
|
||||
///
|
||||
/// // A tampered with cookie does not validate but still exists.
|
||||
/// let mut cookie = jar.get("private").unwrap().clone();
|
||||
/// jar.add(Cookie::new("private", cookie.value().to_string() + "!"));
|
||||
/// assert!(jar.private(&key).get("private").is_none());
|
||||
/// assert!(jar.get("private").is_some());
|
||||
/// ```
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
pub fn private(&mut self, key: &Key) -> PrivateJar {
|
||||
PrivateJar::new(self, key)
|
||||
}
|
||||
|
||||
/// Returns a `SignedJar` with `self` as its parent jar using the key `key`
|
||||
/// to sign/verify cookies added/retrieved from the child jar.
|
||||
///
|
||||
/// Any modifications to the child jar will be reflected on the parent jar,
|
||||
/// and any retrievals from the child jar will be made from the parent jar.
|
||||
///
|
||||
/// This method is only available when the `secure` feature is enabled.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{Cookie, CookieJar, Key};
|
||||
///
|
||||
/// // Generate a secure key.
|
||||
/// let key = Key::generate();
|
||||
///
|
||||
/// // Add a signed cookie.
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.signed(&key).add(Cookie::new("signed", "text"));
|
||||
///
|
||||
/// // The cookie's contents are signed but still in plaintext.
|
||||
/// assert_ne!(jar.get("signed").unwrap().value(), "text");
|
||||
/// assert!(jar.get("signed").unwrap().value().contains("text"));
|
||||
///
|
||||
/// // They can be verified through the child jar.
|
||||
/// assert_eq!(jar.signed(&key).get("signed").unwrap().value(), "text");
|
||||
///
|
||||
/// // A tampered with cookie does not validate but still exists.
|
||||
/// let mut cookie = jar.get("signed").unwrap().clone();
|
||||
/// jar.add(Cookie::new("signed", cookie.value().to_string() + "!"));
|
||||
/// assert!(jar.signed(&key).get("signed").is_none());
|
||||
/// assert!(jar.get("signed").is_some());
|
||||
/// ```
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
pub fn signed(&mut self, key: &Key) -> SignedJar {
|
||||
SignedJar::new(self, key)
|
||||
}
|
||||
}
|
||||
|
||||
use std::collections::hash_set::Iter as HashSetIter;
|
||||
|
||||
/// Iterator over the changes to a cookie jar.
|
||||
pub struct Delta<'a> {
|
||||
iter: HashSetIter<'a, DeltaCookie>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Delta<'a> {
|
||||
type Item = &'a Cookie<'static>;
|
||||
|
||||
fn next(&mut self) -> Option<&'a Cookie<'static>> {
|
||||
self.iter.next().map(|c| &c.cookie)
|
||||
}
|
||||
}
|
||||
|
||||
use std::collections::hash_map::RandomState;
|
||||
use std::collections::hash_set::Difference;
|
||||
use std::iter::Chain;
|
||||
|
||||
/// Iterator over all of the cookies in a jar.
|
||||
pub struct Iter<'a> {
|
||||
delta_cookies:
|
||||
Chain<HashSetIter<'a, DeltaCookie>, Difference<'a, DeltaCookie, RandomState>>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Iter<'a> {
|
||||
type Item = &'a Cookie<'static>;
|
||||
|
||||
fn next(&mut self) -> Option<&'a Cookie<'static>> {
|
||||
for cookie in self.delta_cookies.by_ref() {
|
||||
if !cookie.removed {
|
||||
return Some(&*cookie);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
use super::Key;
|
||||
use super::{Cookie, CookieJar};
|
||||
|
||||
#[test]
|
||||
#[allow(deprecated)]
|
||||
fn simple() {
|
||||
let mut c = CookieJar::new();
|
||||
|
||||
c.add(Cookie::new("test", ""));
|
||||
c.add(Cookie::new("test2", ""));
|
||||
c.remove(Cookie::named("test"));
|
||||
|
||||
assert!(c.get("test").is_none());
|
||||
assert!(c.get("test2").is_some());
|
||||
|
||||
c.add(Cookie::new("test3", ""));
|
||||
c.clear();
|
||||
|
||||
assert!(c.get("test").is_none());
|
||||
assert!(c.get("test2").is_none());
|
||||
assert!(c.get("test3").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jar_is_send() {
|
||||
fn is_send<T: Send>(_: T) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
assert!(is_send(CookieJar::new()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
fn iter() {
|
||||
let key = Key::generate();
|
||||
let mut c = CookieJar::new();
|
||||
|
||||
c.add_original(Cookie::new("original", "original"));
|
||||
|
||||
c.add(Cookie::new("test", "test"));
|
||||
c.add(Cookie::new("test2", "test2"));
|
||||
c.add(Cookie::new("test3", "test3"));
|
||||
assert_eq!(c.iter().count(), 4);
|
||||
|
||||
c.signed(&key).add(Cookie::new("signed", "signed"));
|
||||
c.private(&key).add(Cookie::new("encrypted", "encrypted"));
|
||||
assert_eq!(c.iter().count(), 6);
|
||||
|
||||
c.remove(Cookie::named("test"));
|
||||
assert_eq!(c.iter().count(), 5);
|
||||
|
||||
c.remove(Cookie::named("signed"));
|
||||
c.remove(Cookie::named("test2"));
|
||||
assert_eq!(c.iter().count(), 3);
|
||||
|
||||
c.add(Cookie::new("test2", "test2"));
|
||||
assert_eq!(c.iter().count(), 4);
|
||||
|
||||
c.remove(Cookie::named("test2"));
|
||||
assert_eq!(c.iter().count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "secure-cookies")]
|
||||
fn delta() {
|
||||
use std::collections::HashMap;
|
||||
use time::Duration;
|
||||
|
||||
let mut c = CookieJar::new();
|
||||
|
||||
c.add_original(Cookie::new("original", "original"));
|
||||
c.add_original(Cookie::new("original1", "original1"));
|
||||
|
||||
c.add(Cookie::new("test", "test"));
|
||||
c.add(Cookie::new("test2", "test2"));
|
||||
c.add(Cookie::new("test3", "test3"));
|
||||
c.add(Cookie::new("test4", "test4"));
|
||||
|
||||
c.remove(Cookie::named("test"));
|
||||
c.remove(Cookie::named("original"));
|
||||
|
||||
assert_eq!(c.delta().count(), 4);
|
||||
|
||||
let names: HashMap<_, _> = c.delta().map(|c| (c.name(), c.max_age())).collect();
|
||||
|
||||
assert!(names.get("test2").unwrap().is_none());
|
||||
assert!(names.get("test3").unwrap().is_none());
|
||||
assert!(names.get("test4").unwrap().is_none());
|
||||
assert_eq!(names.get("original").unwrap(), &Some(Duration::seconds(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_original() {
|
||||
let mut jar = CookieJar::new();
|
||||
jar.add_original(Cookie::new("original_a", "a"));
|
||||
jar.add_original(Cookie::new("original_b", "b"));
|
||||
assert_eq!(jar.get("original_a").unwrap().value(), "a");
|
||||
|
||||
jar.add(Cookie::new("original_a", "av2"));
|
||||
assert_eq!(jar.get("original_a").unwrap().value(), "av2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_delta() {
|
||||
let mut jar = CookieJar::new();
|
||||
jar.add(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().count(), 0);
|
||||
|
||||
jar.add_original(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 0);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.add(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_remove_add() {
|
||||
let mut jar = CookieJar::new();
|
||||
jar.add_original(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 0);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
// The cookie's been deleted. Another original doesn't change that.
|
||||
jar.add_original(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.add(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_remove() {
|
||||
let mut jar = CookieJar::new();
|
||||
jar.add_original(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 0);
|
||||
|
||||
jar.add(Cookie::new("name", "val"));
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
|
||||
|
||||
jar.remove(Cookie::named("name"));
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_with_path() {
|
||||
let mut jar = CookieJar::new();
|
||||
jar.add_original(Cookie::build("name", "val").finish());
|
||||
assert_eq!(jar.iter().count(), 1);
|
||||
assert_eq!(jar.delta().count(), 0);
|
||||
assert_eq!(jar.iter().filter(|c| c.path().is_none()).count(), 1);
|
||||
|
||||
jar.remove(Cookie::build("name", "").path("/").finish());
|
||||
assert_eq!(jar.iter().count(), 0);
|
||||
assert_eq!(jar.delta().count(), 1);
|
||||
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
|
||||
assert_eq!(jar.delta().filter(|c| c.path() == Some("/")).count(), 1);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,426 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::cmp;
|
||||
use std::convert::From;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::str::Utf8Error;
|
||||
|
||||
use percent_encoding::percent_decode;
|
||||
use time::{self, Duration};
|
||||
|
||||
use super::{Cookie, CookieStr, SameSite};
|
||||
|
||||
/// Enum corresponding to a parsing error.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum ParseError {
|
||||
/// The cookie did not contain a name/value pair.
|
||||
MissingPair,
|
||||
/// The cookie's name was empty.
|
||||
EmptyName,
|
||||
/// Decoding the cookie's name or value resulted in invalid UTF-8.
|
||||
Utf8Error(Utf8Error),
|
||||
/// It is discouraged to exhaustively match on this enum as its variants may
|
||||
/// grow without a breaking-change bump in version numbers.
|
||||
#[doc(hidden)]
|
||||
__Nonexhasutive,
|
||||
}
|
||||
|
||||
impl ParseError {
|
||||
/// Returns a description of this error as a string
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match *self {
|
||||
ParseError::MissingPair => "the cookie is missing a name/value pair",
|
||||
ParseError::EmptyName => "the cookie's name is empty",
|
||||
ParseError::Utf8Error(_) => {
|
||||
"decoding the cookie's name or value resulted in invalid UTF-8"
|
||||
}
|
||||
ParseError::__Nonexhasutive => unreachable!("__Nonexhasutive ParseError"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ParseError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Utf8Error> for ParseError {
|
||||
fn from(error: Utf8Error) -> ParseError {
|
||||
ParseError::Utf8Error(error)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ParseError {
|
||||
fn description(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
fn indexes_of(needle: &str, haystack: &str) -> Option<(usize, usize)> {
|
||||
let haystack_start = haystack.as_ptr() as usize;
|
||||
let needle_start = needle.as_ptr() as usize;
|
||||
|
||||
if needle_start < haystack_start {
|
||||
return None;
|
||||
}
|
||||
|
||||
if (needle_start + needle.len()) > (haystack_start + haystack.len()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = needle_start - haystack_start;
|
||||
let end = start + needle.len();
|
||||
Some((start, end))
|
||||
}
|
||||
|
||||
fn name_val_decoded(
|
||||
name: &str,
|
||||
val: &str,
|
||||
) -> Result<(CookieStr, CookieStr), ParseError> {
|
||||
let decoded_name = percent_decode(name.as_bytes()).decode_utf8()?;
|
||||
let decoded_value = percent_decode(val.as_bytes()).decode_utf8()?;
|
||||
let name = CookieStr::Concrete(Cow::Owned(decoded_name.into_owned()));
|
||||
let val = CookieStr::Concrete(Cow::Owned(decoded_value.into_owned()));
|
||||
|
||||
Ok((name, val))
|
||||
}
|
||||
|
||||
// This function does the real parsing but _does not_ set the `cookie_string` in
|
||||
// the returned cookie object. This only exists so that the borrow to `s` is
|
||||
// returned at the end of the call, allowing the `cookie_string` field to be
|
||||
// set in the outer `parse` function.
|
||||
fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
|
||||
let mut attributes = s.split(';');
|
||||
let key_value = match attributes.next() {
|
||||
Some(s) => s,
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
// Determine the name = val.
|
||||
let (name, value) = match key_value.find('=') {
|
||||
Some(i) => (key_value[..i].trim(), key_value[(i + 1)..].trim()),
|
||||
None => return Err(ParseError::MissingPair),
|
||||
};
|
||||
|
||||
if name.is_empty() {
|
||||
return Err(ParseError::EmptyName);
|
||||
}
|
||||
|
||||
// Create a cookie with all of the defaults. We'll fill things in while we
|
||||
// iterate through the parameters below.
|
||||
let (name, value) = if decode {
|
||||
name_val_decoded(name, value)?
|
||||
} else {
|
||||
let name_indexes = indexes_of(name, s).expect("name sub");
|
||||
let value_indexes = indexes_of(value, s).expect("value sub");
|
||||
let name = CookieStr::Indexed(name_indexes.0, name_indexes.1);
|
||||
let value = CookieStr::Indexed(value_indexes.0, value_indexes.1);
|
||||
|
||||
(name, value)
|
||||
};
|
||||
|
||||
let mut cookie = Cookie {
|
||||
name,
|
||||
value,
|
||||
cookie_string: None,
|
||||
expires: None,
|
||||
max_age: None,
|
||||
domain: None,
|
||||
path: None,
|
||||
secure: None,
|
||||
http_only: None,
|
||||
same_site: None,
|
||||
};
|
||||
|
||||
for attr in attributes {
|
||||
let (key, value) = match attr.find('=') {
|
||||
Some(i) => (attr[..i].trim(), Some(attr[(i + 1)..].trim())),
|
||||
None => (attr.trim(), None),
|
||||
};
|
||||
|
||||
match (&*key.to_ascii_lowercase(), value) {
|
||||
("secure", _) => cookie.secure = Some(true),
|
||||
("httponly", _) => cookie.http_only = Some(true),
|
||||
("max-age", Some(v)) => {
|
||||
// See RFC 6265 Section 5.2.2, negative values indicate that the
|
||||
// earliest possible expiration time should be used, so set the
|
||||
// max age as 0 seconds.
|
||||
cookie.max_age = match v.parse() {
|
||||
Ok(val) if val <= 0 => Some(Duration::zero()),
|
||||
Ok(val) => {
|
||||
// Don't panic if the max age seconds is greater than what's supported by
|
||||
// `Duration`.
|
||||
let val = cmp::min(val, Duration::max_value().num_seconds());
|
||||
Some(Duration::seconds(val))
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
}
|
||||
("domain", Some(mut domain)) if !domain.is_empty() => {
|
||||
if domain.starts_with('.') {
|
||||
domain = &domain[1..];
|
||||
}
|
||||
|
||||
let (i, j) = indexes_of(domain, s).expect("domain sub");
|
||||
cookie.domain = Some(CookieStr::Indexed(i, j));
|
||||
}
|
||||
("path", Some(v)) => {
|
||||
let (i, j) = indexes_of(v, s).expect("path sub");
|
||||
cookie.path = Some(CookieStr::Indexed(i, j));
|
||||
}
|
||||
("samesite", Some(v)) => {
|
||||
if v.eq_ignore_ascii_case("strict") {
|
||||
cookie.same_site = Some(SameSite::Strict);
|
||||
} else if v.eq_ignore_ascii_case("lax") {
|
||||
cookie.same_site = Some(SameSite::Lax);
|
||||
} else {
|
||||
// We do nothing here, for now. When/if the `SameSite`
|
||||
// attribute becomes standard, the spec says that we should
|
||||
// ignore this cookie, i.e, fail to parse it, when an
|
||||
// invalid value is passed in. The draft is at
|
||||
// http://httpwg.org/http-extensions/draft-ietf-httpbis-cookie-same-site.html.
|
||||
}
|
||||
}
|
||||
("expires", Some(v)) => {
|
||||
// Try strptime with three date formats according to
|
||||
// http://tools.ietf.org/html/rfc2616#section-3.3.1. Try
|
||||
// additional ones as encountered in the real world.
|
||||
let tm = time::strptime(v, "%a, %d %b %Y %H:%M:%S %Z")
|
||||
.or_else(|_| time::strptime(v, "%A, %d-%b-%y %H:%M:%S %Z"))
|
||||
.or_else(|_| time::strptime(v, "%a, %d-%b-%Y %H:%M:%S %Z"))
|
||||
.or_else(|_| time::strptime(v, "%a %b %d %H:%M:%S %Y"));
|
||||
|
||||
if let Ok(time) = tm {
|
||||
cookie.expires = Some(time)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// We're going to be permissive here. If we have no idea what
|
||||
// this is, then it's something nonstandard. We're not going to
|
||||
// store it (because it's not compliant), but we're also not
|
||||
// going to emit an error.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(cookie)
|
||||
}
|
||||
|
||||
pub fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, ParseError>
|
||||
where
|
||||
S: Into<Cow<'c, str>>,
|
||||
{
|
||||
let s = cow.into();
|
||||
let mut cookie = parse_inner(&s, decode)?;
|
||||
cookie.cookie_string = Some(s);
|
||||
Ok(cookie)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Cookie, SameSite};
|
||||
use time::{strptime, Duration};
|
||||
|
||||
macro_rules! assert_eq_parse {
|
||||
($string:expr, $expected:expr) => {
|
||||
let cookie = match Cookie::parse($string) {
|
||||
Ok(cookie) => cookie,
|
||||
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
|
||||
};
|
||||
|
||||
assert_eq!(cookie, $expected);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! assert_ne_parse {
|
||||
($string:expr, $expected:expr) => {
|
||||
let cookie = match Cookie::parse($string) {
|
||||
Ok(cookie) => cookie,
|
||||
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
|
||||
};
|
||||
|
||||
assert_ne!(cookie, $expected);
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_same_site() {
|
||||
let expected = Cookie::build("foo", "bar")
|
||||
.same_site(SameSite::Lax)
|
||||
.finish();
|
||||
|
||||
assert_eq_parse!("foo=bar; SameSite=Lax", expected);
|
||||
assert_eq_parse!("foo=bar; SameSite=lax", expected);
|
||||
assert_eq_parse!("foo=bar; SameSite=LAX", expected);
|
||||
assert_eq_parse!("foo=bar; samesite=Lax", expected);
|
||||
assert_eq_parse!("foo=bar; SAMESITE=Lax", expected);
|
||||
|
||||
let expected = Cookie::build("foo", "bar")
|
||||
.same_site(SameSite::Strict)
|
||||
.finish();
|
||||
|
||||
assert_eq_parse!("foo=bar; SameSite=Strict", expected);
|
||||
assert_eq_parse!("foo=bar; SameSITE=Strict", expected);
|
||||
assert_eq_parse!("foo=bar; SameSite=strict", expected);
|
||||
assert_eq_parse!("foo=bar; SameSite=STrICT", expected);
|
||||
assert_eq_parse!("foo=bar; SameSite=STRICT", expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse() {
|
||||
assert!(Cookie::parse("bar").is_err());
|
||||
assert!(Cookie::parse("=bar").is_err());
|
||||
assert!(Cookie::parse(" =bar").is_err());
|
||||
assert!(Cookie::parse("foo=").is_ok());
|
||||
|
||||
let expected = Cookie::build("foo", "bar=baz").finish();
|
||||
assert_eq_parse!("foo=bar=baz", expected);
|
||||
|
||||
let mut expected = Cookie::build("foo", "bar").finish();
|
||||
assert_eq_parse!("foo=bar", expected);
|
||||
assert_eq_parse!("foo = bar", expected);
|
||||
assert_eq_parse!(" foo=bar ", expected);
|
||||
assert_eq_parse!(" foo=bar ;Domain=", expected);
|
||||
assert_eq_parse!(" foo=bar ;Domain= ", expected);
|
||||
assert_eq_parse!(" foo=bar ;Ignored", expected);
|
||||
|
||||
let mut unexpected = Cookie::build("foo", "bar").http_only(false).finish();
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly", unexpected);
|
||||
assert_ne_parse!(" foo=bar; httponly", unexpected);
|
||||
|
||||
expected.set_http_only(true);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly", expected);
|
||||
assert_eq_parse!(" foo=bar ;httponly", expected);
|
||||
assert_eq_parse!(" foo=bar ;HTTPONLY=whatever", expected);
|
||||
assert_eq_parse!(" foo=bar ; sekure; HTTPONLY", expected);
|
||||
|
||||
expected.set_secure(true);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure=aaaa", expected);
|
||||
|
||||
unexpected.set_http_only(true);
|
||||
unexpected.set_secure(true);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; skeure", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; =secure", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly;", unexpected);
|
||||
|
||||
unexpected.set_secure(false);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
|
||||
|
||||
expected.set_max_age(Duration::zero());
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=0", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0 ", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=-1", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", expected);
|
||||
|
||||
expected.set_max_age(Duration::minutes(1));
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=60", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 60 ", expected);
|
||||
|
||||
expected.set_max_age(Duration::seconds(4));
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=4", expected);
|
||||
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 4 ", expected);
|
||||
|
||||
unexpected.set_secure(true);
|
||||
unexpected.set_max_age(Duration::minutes(1));
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=122", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 38 ", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=51", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", unexpected);
|
||||
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0", unexpected);
|
||||
|
||||
expected.set_path("/");
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/", expected);
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/", expected);
|
||||
|
||||
expected.set_path("/foo");
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", expected);
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/foo", expected);
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path=/foo", expected);
|
||||
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path = /foo", expected);
|
||||
|
||||
unexpected.set_max_age(Duration::seconds(4));
|
||||
unexpected.set_path("/bar");
|
||||
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", unexpected);
|
||||
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/baz", unexpected);
|
||||
|
||||
expected.set_domain("www.foo.com");
|
||||
assert_eq_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=www.foo.com",
|
||||
expected
|
||||
);
|
||||
|
||||
expected.set_domain("foo.com");
|
||||
assert_eq_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=foo.com",
|
||||
expected
|
||||
);
|
||||
assert_eq_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=FOO.COM",
|
||||
expected
|
||||
);
|
||||
|
||||
unexpected.set_path("/foo");
|
||||
unexpected.set_domain("bar.com");
|
||||
assert_ne_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=foo.com",
|
||||
unexpected
|
||||
);
|
||||
assert_ne_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=FOO.COM",
|
||||
unexpected
|
||||
);
|
||||
|
||||
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
|
||||
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
|
||||
expected.set_expires(expires);
|
||||
assert_eq_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
|
||||
expected
|
||||
);
|
||||
|
||||
unexpected.set_domain("foo.com");
|
||||
let bad_expires = strptime(time_str, "%a, %d %b %Y %H:%S:%M %Z").unwrap();
|
||||
expected.set_expires(bad_expires);
|
||||
assert_ne_parse!(
|
||||
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
|
||||
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
|
||||
unexpected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odd_characters() {
|
||||
let expected = Cookie::new("foo", "b%2Fr");
|
||||
assert_eq_parse!("foo=b%2Fr", expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odd_characters_encoded() {
|
||||
let expected = Cookie::new("foo", "b/r");
|
||||
let cookie = match Cookie::parse_encoded("foo=b%2Fr") {
|
||||
Ok(cookie) => cookie,
|
||||
Err(e) => panic!("Failed to parse: {:?}", e),
|
||||
};
|
||||
|
||||
assert_eq!(cookie, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn do_not_panic_on_large_max_ages() {
|
||||
let max_seconds = Duration::max_value().num_seconds();
|
||||
let expected = Cookie::build("foo", "bar")
|
||||
.max_age(Duration::seconds(max_seconds))
|
||||
.finish();
|
||||
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", max_seconds + 1), expected);
|
||||
}
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
use ring::digest::{Algorithm, SHA256};
|
||||
use ring::hkdf::expand;
|
||||
use ring::hmac::SigningKey;
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
|
||||
use super::private::KEY_LEN as PRIVATE_KEY_LEN;
|
||||
use super::signed::KEY_LEN as SIGNED_KEY_LEN;
|
||||
|
||||
static HKDF_DIGEST: &'static Algorithm = &SHA256;
|
||||
const KEYS_INFO: &'static str = "COOKIE;SIGNED:HMAC-SHA256;PRIVATE:AEAD-AES-256-GCM";
|
||||
|
||||
/// A cryptographic master key for use with `Signed` and/or `Private` jars.
|
||||
///
|
||||
/// This structure encapsulates secure, cryptographic keys for use with both
|
||||
/// [PrivateJar](struct.PrivateJar.html) and [SignedJar](struct.SignedJar.html).
|
||||
/// It can be derived from a single master key via
|
||||
/// [from_master](#method.from_master) or generated from a secure random source
|
||||
/// via [generate](#method.generate). A single instance of `Key` can be used for
|
||||
/// both a `PrivateJar` and a `SignedJar`.
|
||||
///
|
||||
/// This type is only available when the `secure` feature is enabled.
|
||||
#[derive(Clone)]
|
||||
pub struct Key {
|
||||
signing_key: [u8; SIGNED_KEY_LEN],
|
||||
encryption_key: [u8; PRIVATE_KEY_LEN],
|
||||
}
|
||||
|
||||
impl Key {
|
||||
/// Derives new signing/encryption keys from a master key.
|
||||
///
|
||||
/// The master key must be at least 256-bits (32 bytes). For security, the
|
||||
/// master key _must_ be cryptographically random. The keys are derived
|
||||
/// deterministically from the master key.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `key` is less than 32 bytes in length.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Key;
|
||||
///
|
||||
/// # /*
|
||||
/// let master_key = { /* a cryptographically random key >= 32 bytes */ };
|
||||
/// # */
|
||||
/// # let master_key: &Vec<u8> = &(0..32).collect();
|
||||
///
|
||||
/// let key = Key::from_master(master_key);
|
||||
/// ```
|
||||
pub fn from_master(key: &[u8]) -> Key {
|
||||
if key.len() < 32 {
|
||||
panic!(
|
||||
"bad master key length: expected at least 32 bytes, found {}",
|
||||
key.len()
|
||||
);
|
||||
}
|
||||
|
||||
// Expand the user's key into two.
|
||||
let prk = SigningKey::new(HKDF_DIGEST, key);
|
||||
let mut both_keys = [0; SIGNED_KEY_LEN + PRIVATE_KEY_LEN];
|
||||
expand(&prk, KEYS_INFO.as_bytes(), &mut both_keys);
|
||||
|
||||
// Copy the keys into their respective arrays.
|
||||
let mut signing_key = [0; SIGNED_KEY_LEN];
|
||||
let mut encryption_key = [0; PRIVATE_KEY_LEN];
|
||||
signing_key.copy_from_slice(&both_keys[..SIGNED_KEY_LEN]);
|
||||
encryption_key.copy_from_slice(&both_keys[SIGNED_KEY_LEN..]);
|
||||
|
||||
Key {
|
||||
signing_key,
|
||||
encryption_key,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates signing/encryption keys from a secure, random source. Keys are
|
||||
/// generated nondeterministically.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if randomness cannot be retrieved from the operating system. See
|
||||
/// [try_generate](#method.try_generate) for a non-panicking version.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Key;
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// ```
|
||||
pub fn generate() -> Key {
|
||||
Self::try_generate().expect("failed to generate `Key` from randomness")
|
||||
}
|
||||
|
||||
/// Attempts to generate signing/encryption keys from a secure, random
|
||||
/// source. Keys are generated nondeterministically. If randomness cannot be
|
||||
/// retrieved from the underlying operating system, returns `None`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Key;
|
||||
///
|
||||
/// let key = Key::try_generate();
|
||||
/// ```
|
||||
pub fn try_generate() -> Option<Key> {
|
||||
let mut sign_key = [0; SIGNED_KEY_LEN];
|
||||
let mut enc_key = [0; PRIVATE_KEY_LEN];
|
||||
|
||||
let rng = SystemRandom::new();
|
||||
if rng.fill(&mut sign_key).is_err() || rng.fill(&mut enc_key).is_err() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Key {
|
||||
signing_key: sign_key,
|
||||
encryption_key: enc_key,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the raw bytes of a key suitable for signing cookies.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Key;
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let signing_key = key.signing();
|
||||
/// ```
|
||||
pub fn signing(&self) -> &[u8] {
|
||||
&self.signing_key[..]
|
||||
}
|
||||
|
||||
/// Returns the raw bytes of a key suitable for encrypting cookies.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::Key;
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let encryption_key = key.encryption();
|
||||
/// ```
|
||||
pub fn encryption(&self) -> &[u8] {
|
||||
&self.encryption_key[..]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::Key;
|
||||
|
||||
#[test]
|
||||
fn deterministic_from_master() {
|
||||
let master_key: Vec<u8> = (0..32).collect();
|
||||
|
||||
let key_a = Key::from_master(&master_key);
|
||||
let key_b = Key::from_master(&master_key);
|
||||
|
||||
assert_eq!(key_a.signing(), key_b.signing());
|
||||
assert_eq!(key_a.encryption(), key_b.encryption());
|
||||
assert_ne!(key_a.encryption(), key_a.signing());
|
||||
|
||||
let master_key_2: Vec<u8> = (32..64).collect();
|
||||
let key_2 = Key::from_master(&master_key_2);
|
||||
|
||||
assert_ne!(key_2.signing(), key_a.signing());
|
||||
assert_ne!(key_2.encryption(), key_a.encryption());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_deterministic_generate() {
|
||||
let key_a = Key::generate();
|
||||
let key_b = Key::generate();
|
||||
|
||||
assert_ne!(key_a.signing(), key_b.signing());
|
||||
assert_ne!(key_a.encryption(), key_b.encryption());
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
#[cfg(test)]
|
||||
macro_rules! assert_simple_behaviour {
|
||||
($clear:expr, $secure:expr) => {{
|
||||
assert_eq!($clear.iter().count(), 0);
|
||||
|
||||
$secure.add(Cookie::new("name", "val"));
|
||||
assert_eq!($clear.iter().count(), 1);
|
||||
assert_eq!($secure.get("name").unwrap().value(), "val");
|
||||
assert_ne!($clear.get("name").unwrap().value(), "val");
|
||||
|
||||
$secure.add(Cookie::new("another", "two"));
|
||||
assert_eq!($clear.iter().count(), 2);
|
||||
|
||||
$clear.remove(Cookie::named("another"));
|
||||
assert_eq!($clear.iter().count(), 1);
|
||||
|
||||
$secure.remove(Cookie::named("name"));
|
||||
assert_eq!($clear.iter().count(), 0);
|
||||
}};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
macro_rules! assert_secure_behaviour {
|
||||
($clear:expr, $secure:expr) => {{
|
||||
$secure.add(Cookie::new("secure", "secure"));
|
||||
assert!($clear.get("secure").unwrap().value() != "secure");
|
||||
assert!($secure.get("secure").unwrap().value() == "secure");
|
||||
|
||||
let mut cookie = $clear.get("secure").unwrap().clone();
|
||||
let new_val = format!("{}l", cookie.value());
|
||||
cookie.set_value(new_val);
|
||||
$clear.add(cookie);
|
||||
assert!($secure.get("secure").is_none());
|
||||
|
||||
let mut cookie = $clear.get("secure").unwrap().clone();
|
||||
cookie.set_value("foobar");
|
||||
$clear.add(cookie);
|
||||
assert!($secure.get("secure").is_none());
|
||||
}};
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
//! Fork of https://github.com/alexcrichton/cookie-rs
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
mod key;
|
||||
mod private;
|
||||
mod signed;
|
||||
|
||||
pub use self::key::*;
|
||||
pub use self::private::*;
|
||||
pub use self::signed::*;
|
||||
@@ -1,269 +0,0 @@
|
||||
use std::str;
|
||||
|
||||
use log::warn;
|
||||
use ring::aead::{open_in_place, seal_in_place, Aad, Algorithm, Nonce, AES_256_GCM};
|
||||
use ring::aead::{OpeningKey, SealingKey};
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
|
||||
use super::Key;
|
||||
use crate::cookie::{Cookie, CookieJar};
|
||||
|
||||
// Keep these in sync, and keep the key len synced with the `private` docs as
|
||||
// well as the `KEYS_INFO` const in secure::Key.
|
||||
static ALGO: &'static Algorithm = &AES_256_GCM;
|
||||
const NONCE_LEN: usize = 12;
|
||||
pub const KEY_LEN: usize = 32;
|
||||
|
||||
/// A child cookie jar that provides authenticated encryption for its cookies.
|
||||
///
|
||||
/// A _private_ child jar signs and encrypts all the cookies added to it and
|
||||
/// verifies and decrypts cookies retrieved from it. Any cookies stored in a
|
||||
/// `PrivateJar` are simultaneously assured confidentiality, integrity, and
|
||||
/// authenticity. In other words, clients cannot discover nor tamper with the
|
||||
/// contents of a cookie, nor can they fabricate cookie data.
|
||||
///
|
||||
/// This type is only available when the `secure` feature is enabled.
|
||||
pub struct PrivateJar<'a> {
|
||||
parent: &'a mut CookieJar,
|
||||
key: [u8; KEY_LEN],
|
||||
}
|
||||
|
||||
impl<'a> PrivateJar<'a> {
|
||||
/// Creates a new child `PrivateJar` with parent `parent` and key `key`.
|
||||
/// This method is typically called indirectly via the `signed` method of
|
||||
/// `CookieJar`.
|
||||
#[doc(hidden)]
|
||||
pub fn new(parent: &'a mut CookieJar, key: &Key) -> PrivateJar<'a> {
|
||||
let mut key_array = [0u8; KEY_LEN];
|
||||
key_array.copy_from_slice(key.encryption());
|
||||
PrivateJar {
|
||||
parent,
|
||||
key: key_array,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a sealed value `str` and a key name `name`, where the nonce is
|
||||
/// prepended to the original value and then both are Base64 encoded,
|
||||
/// verifies and decrypts the sealed value and returns it. If there's a
|
||||
/// problem, returns an `Err` with a string describing the issue.
|
||||
fn unseal(&self, name: &str, value: &str) -> Result<String, &'static str> {
|
||||
let mut data = base64::decode(value).map_err(|_| "bad base64 value")?;
|
||||
if data.len() <= NONCE_LEN {
|
||||
return Err("length of decoded data is <= NONCE_LEN");
|
||||
}
|
||||
|
||||
let ad = Aad::from(name.as_bytes());
|
||||
let key = OpeningKey::new(ALGO, &self.key).expect("opening key");
|
||||
let (nonce, sealed) = data.split_at_mut(NONCE_LEN);
|
||||
let nonce =
|
||||
Nonce::try_assume_unique_for_key(nonce).expect("invalid length of `nonce`");
|
||||
let unsealed = open_in_place(&key, nonce, ad, 0, sealed)
|
||||
.map_err(|_| "invalid key/nonce/value: bad seal")?;
|
||||
|
||||
if let Ok(unsealed_utf8) = str::from_utf8(unsealed) {
|
||||
Ok(unsealed_utf8.to_string())
|
||||
} else {
|
||||
warn!(
|
||||
"Private cookie does not have utf8 content!
|
||||
It is likely the secret key used to encrypt them has been leaked.
|
||||
Please change it as soon as possible."
|
||||
);
|
||||
Err("bad unsealed utf8")
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Cookie` inside this jar with the name `name`
|
||||
/// and authenticates and decrypts the cookie's value, returning a `Cookie`
|
||||
/// with the decrypted value. If the cookie cannot be found, or the cookie
|
||||
/// fails to authenticate or decrypt, `None` is returned.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// let mut private_jar = jar.private(&key);
|
||||
/// assert!(private_jar.get("name").is_none());
|
||||
///
|
||||
/// private_jar.add(Cookie::new("name", "value"));
|
||||
/// assert_eq!(private_jar.get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
|
||||
if let Some(cookie_ref) = self.parent.get(name) {
|
||||
let mut cookie = cookie_ref.clone();
|
||||
if let Ok(value) = self.unseal(name, cookie.value()) {
|
||||
cookie.set_value(value);
|
||||
return Some(cookie);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Adds `cookie` to the parent jar. The cookie's value is encrypted with
|
||||
/// authenticated encryption assuring confidentiality, integrity, and
|
||||
/// authenticity.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.private(&key).add(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_ne!(jar.get("name").unwrap().value(), "value");
|
||||
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn add(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.encrypt_cookie(&mut cookie);
|
||||
|
||||
// Add the sealed cookie to the parent.
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to parent jar. The cookie's value is
|
||||
/// encrypted with authenticated encryption assuring confidentiality,
|
||||
/// integrity, and authenticity. Adding an original cookie does not affect
|
||||
/// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.private(&key).add_original(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.encrypt_cookie(&mut cookie);
|
||||
|
||||
// Add the sealed cookie to the parent.
|
||||
self.parent.add_original(cookie);
|
||||
}
|
||||
|
||||
/// Encrypts the cookie's value with
|
||||
/// authenticated encryption assuring confidentiality, integrity, and authenticity.
|
||||
fn encrypt_cookie(&self, cookie: &mut Cookie) {
|
||||
let name = cookie.name().as_bytes();
|
||||
let value = cookie.value().as_bytes();
|
||||
let data = encrypt_name_value(name, value, &self.key);
|
||||
|
||||
// Base64 encode the nonce and encrypted value.
|
||||
let sealed_value = base64::encode(&data);
|
||||
cookie.set_value(sealed_value);
|
||||
}
|
||||
|
||||
/// Removes `cookie` from the parent jar.
|
||||
///
|
||||
/// For correct removal, the passed in `cookie` must contain the same `path`
|
||||
/// and `domain` as the cookie that was initially set.
|
||||
///
|
||||
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
|
||||
/// details.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// let mut private_jar = jar.private(&key);
|
||||
///
|
||||
/// private_jar.add(Cookie::new("name", "value"));
|
||||
/// assert!(private_jar.get("name").is_some());
|
||||
///
|
||||
/// private_jar.remove(Cookie::named("name"));
|
||||
/// assert!(private_jar.get("name").is_none());
|
||||
/// ```
|
||||
pub fn remove(&mut self, cookie: Cookie<'static>) {
|
||||
self.parent.remove(cookie);
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_name_value(name: &[u8], value: &[u8], key: &[u8]) -> Vec<u8> {
|
||||
// Create the `SealingKey` structure.
|
||||
let key = SealingKey::new(ALGO, key).expect("sealing key creation");
|
||||
|
||||
// Create a vec to hold the [nonce | cookie value | overhead].
|
||||
let overhead = ALGO.tag_len();
|
||||
let mut data = vec![0; NONCE_LEN + value.len() + overhead];
|
||||
|
||||
// Randomly generate the nonce, then copy the cookie value as input.
|
||||
let (nonce, in_out) = data.split_at_mut(NONCE_LEN);
|
||||
SystemRandom::new()
|
||||
.fill(nonce)
|
||||
.expect("couldn't random fill nonce");
|
||||
in_out[..value.len()].copy_from_slice(value);
|
||||
let nonce =
|
||||
Nonce::try_assume_unique_for_key(nonce).expect("invalid length of `nonce`");
|
||||
|
||||
// Use cookie's name as associated data to prevent value swapping.
|
||||
let ad = Aad::from(name);
|
||||
|
||||
// Perform the actual sealing operation and get the output length.
|
||||
let output_len =
|
||||
seal_in_place(&key, nonce, ad, in_out, overhead).expect("in-place seal");
|
||||
|
||||
// Remove the overhead and return the sealed content.
|
||||
data.truncate(NONCE_LEN + output_len);
|
||||
data
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{encrypt_name_value, Cookie, CookieJar, Key};
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let key = Key::generate();
|
||||
let mut jar = CookieJar::new();
|
||||
assert_simple_behaviour!(jar, jar.private(&key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn private() {
|
||||
let key = Key::generate();
|
||||
let mut jar = CookieJar::new();
|
||||
assert_secure_behaviour!(jar, jar.private(&key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_utf8() {
|
||||
let key = Key::generate();
|
||||
let mut jar = CookieJar::new();
|
||||
|
||||
let name = "malicious";
|
||||
let mut assert_non_utf8 = |value: &[u8]| {
|
||||
let sealed = encrypt_name_value(name.as_bytes(), value, &key.encryption());
|
||||
let encoded = base64::encode(&sealed);
|
||||
assert_eq!(
|
||||
jar.private(&key).unseal(name, &encoded),
|
||||
Err("bad unsealed utf8")
|
||||
);
|
||||
jar.add(Cookie::new(name, encoded));
|
||||
assert_eq!(jar.private(&key).get(name), None);
|
||||
};
|
||||
|
||||
assert_non_utf8(&[0x72, 0xfb, 0xdf, 0x74]); // rûst in ISO/IEC 8859-1
|
||||
|
||||
let mut malicious =
|
||||
String::from(r#"{"id":"abc123??%X","admin":true}"#).into_bytes();
|
||||
malicious[8] |= 0b1100_0000;
|
||||
malicious[9] |= 0b1100_0000;
|
||||
assert_non_utf8(&malicious);
|
||||
}
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
use ring::digest::{Algorithm, SHA256};
|
||||
use ring::hmac::{sign, verify_with_own_key as verify, SigningKey};
|
||||
|
||||
use super::Key;
|
||||
use crate::cookie::{Cookie, CookieJar};
|
||||
|
||||
// Keep these in sync, and keep the key len synced with the `signed` docs as
|
||||
// well as the `KEYS_INFO` const in secure::Key.
|
||||
static HMAC_DIGEST: &'static Algorithm = &SHA256;
|
||||
const BASE64_DIGEST_LEN: usize = 44;
|
||||
pub const KEY_LEN: usize = 32;
|
||||
|
||||
/// A child cookie jar that authenticates its cookies.
|
||||
///
|
||||
/// A _signed_ child jar signs all the cookies added to it and verifies cookies
|
||||
/// retrieved from it. Any cookies stored in a `SignedJar` are assured integrity
|
||||
/// and authenticity. In other words, clients cannot tamper with the contents of
|
||||
/// a cookie nor can they fabricate cookie values, but the data is visible in
|
||||
/// plaintext.
|
||||
///
|
||||
/// This type is only available when the `secure` feature is enabled.
|
||||
pub struct SignedJar<'a> {
|
||||
parent: &'a mut CookieJar,
|
||||
key: SigningKey,
|
||||
}
|
||||
|
||||
impl<'a> SignedJar<'a> {
|
||||
/// Creates a new child `SignedJar` with parent `parent` and key `key`. This
|
||||
/// method is typically called indirectly via the `signed` method of
|
||||
/// `CookieJar`.
|
||||
#[doc(hidden)]
|
||||
pub fn new(parent: &'a mut CookieJar, key: &Key) -> SignedJar<'a> {
|
||||
SignedJar {
|
||||
parent,
|
||||
key: SigningKey::new(HMAC_DIGEST, key.signing()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a signed value `str` where the signature is prepended to `value`,
|
||||
/// verifies the signed value and returns it. If there's a problem, returns
|
||||
/// an `Err` with a string describing the issue.
|
||||
fn verify(&self, cookie_value: &str) -> Result<String, &'static str> {
|
||||
if cookie_value.len() < BASE64_DIGEST_LEN {
|
||||
return Err("length of value is <= BASE64_DIGEST_LEN");
|
||||
}
|
||||
|
||||
let (digest_str, value) = cookie_value.split_at(BASE64_DIGEST_LEN);
|
||||
let sig = base64::decode(digest_str).map_err(|_| "bad base64 digest")?;
|
||||
|
||||
verify(&self.key, value.as_bytes(), &sig)
|
||||
.map(|_| value.to_string())
|
||||
.map_err(|_| "value did not verify")
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Cookie` inside this jar with the name `name`
|
||||
/// and verifies the authenticity and integrity of the cookie's value,
|
||||
/// returning a `Cookie` with the authenticated value. If the cookie cannot
|
||||
/// be found, or the cookie fails to verify, `None` is returned.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// let mut signed_jar = jar.signed(&key);
|
||||
/// assert!(signed_jar.get("name").is_none());
|
||||
///
|
||||
/// signed_jar.add(Cookie::new("name", "value"));
|
||||
/// assert_eq!(signed_jar.get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
|
||||
if let Some(cookie_ref) = self.parent.get(name) {
|
||||
let mut cookie = cookie_ref.clone();
|
||||
if let Ok(value) = self.verify(cookie.value()) {
|
||||
cookie.set_value(value);
|
||||
return Some(cookie);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Adds `cookie` to the parent jar. The cookie's value is signed assuring
|
||||
/// integrity and authenticity.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.signed(&key).add(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_ne!(jar.get("name").unwrap().value(), "value");
|
||||
/// assert!(jar.get("name").unwrap().value().contains("value"));
|
||||
/// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn add(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.sign_cookie(&mut cookie);
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to this jar. The cookie's value is signed
|
||||
/// assuring integrity and authenticity. Adding an original cookie does not
|
||||
/// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.signed(&key).add_original(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.sign_cookie(&mut cookie);
|
||||
self.parent.add_original(cookie);
|
||||
}
|
||||
|
||||
/// Signs the cookie's value assuring integrity and authenticity.
|
||||
fn sign_cookie(&self, cookie: &mut Cookie) {
|
||||
let digest = sign(&self.key, cookie.value().as_bytes());
|
||||
let mut new_value = base64::encode(digest.as_ref());
|
||||
new_value.push_str(cookie.value());
|
||||
cookie.set_value(new_value);
|
||||
}
|
||||
|
||||
/// Removes `cookie` from the parent jar.
|
||||
///
|
||||
/// For correct removal, the passed in `cookie` must contain the same `path`
|
||||
/// and `domain` as the cookie that was initially set.
|
||||
///
|
||||
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
|
||||
/// details.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use actix_http::cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// let mut signed_jar = jar.signed(&key);
|
||||
///
|
||||
/// signed_jar.add(Cookie::new("name", "value"));
|
||||
/// assert!(signed_jar.get("name").is_some());
|
||||
///
|
||||
/// signed_jar.remove(Cookie::named("name"));
|
||||
/// assert!(signed_jar.get("name").is_none());
|
||||
/// ```
|
||||
pub fn remove(&mut self, cookie: Cookie<'static>) {
|
||||
self.parent.remove(cookie);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{Cookie, CookieJar, Key};
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let key = Key::generate();
|
||||
let mut jar = CookieJar::new();
|
||||
assert_simple_behaviour!(jar, jar.signed(&key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn private() {
|
||||
let key = Key::generate();
|
||||
let mut jar = CookieJar::new();
|
||||
assert_secure_behaviour!(jar, jar.signed(&key));
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
use std::future::Future;
|
||||
use std::io::{self, Write};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_threadpool::{run, CpuFuture};
|
||||
#[cfg(feature = "brotli")]
|
||||
use brotli2::write::BrotliDecoder;
|
||||
use brotli::DecompressorWriter as BrotliDecoder;
|
||||
use bytes::Bytes;
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
use flate2::write::{GzDecoder, ZlibDecoder};
|
||||
use futures::{try_ready, Async, Future, Poll, Stream};
|
||||
use futures_core::{ready, Stream};
|
||||
|
||||
use super::Writer;
|
||||
use crate::error::PayloadError;
|
||||
@@ -23,21 +24,18 @@ pub struct Decoder<S> {
|
||||
|
||||
impl<S> Decoder<S>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = PayloadError>,
|
||||
S: Stream<Item = Result<Bytes, PayloadError>>,
|
||||
{
|
||||
/// Construct a decoder.
|
||||
#[inline]
|
||||
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
|
||||
let decoder = match encoding {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
|
||||
BrotliDecoder::new(Writer::new()),
|
||||
BrotliDecoder::new(Writer::new(), 8 * 1024),
|
||||
))),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
|
||||
ZlibDecoder::new(Writer::new()),
|
||||
))),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
|
||||
GzDecoder::new(Writer::new()),
|
||||
))),
|
||||
@@ -71,34 +69,40 @@ where
|
||||
|
||||
impl<S> Stream for Decoder<S>
|
||||
where
|
||||
S: Stream<Item = Bytes, Error = PayloadError>,
|
||||
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
|
||||
{
|
||||
type Item = Bytes;
|
||||
type Error = PayloadError;
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
if let Some(ref mut fut) = self.fut {
|
||||
let (chunk, decoder) = try_ready!(fut.poll());
|
||||
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Poll::Ready(Some(Err(e.into()))),
|
||||
};
|
||||
self.decoder = Some(decoder);
|
||||
self.fut.take();
|
||||
if let Some(chunk) = chunk {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
|
||||
if self.eof {
|
||||
return Ok(Async::Ready(None));
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
match self.stream.poll()? {
|
||||
Async::Ready(Some(chunk)) => {
|
||||
match Pin::new(&mut self.stream).poll_next(cx) {
|
||||
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
if let Some(mut decoder) = self.decoder.take() {
|
||||
if chunk.len() < INPLACE {
|
||||
let chunk = decoder.feed_data(chunk)?;
|
||||
self.decoder = Some(decoder);
|
||||
if let Some(chunk) = chunk {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
} else {
|
||||
self.fut = Some(run(move || {
|
||||
@@ -108,41 +112,40 @@ where
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
Async::Ready(None) => {
|
||||
Poll::Ready(None) => {
|
||||
self.eof = true;
|
||||
return if let Some(mut decoder) = self.decoder.take() {
|
||||
Ok(Async::Ready(decoder.feed_eof()?))
|
||||
match decoder.feed_eof() {
|
||||
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
|
||||
Ok(None) => Poll::Ready(None),
|
||||
Err(err) => Poll::Ready(Some(Err(err.into()))),
|
||||
}
|
||||
} else {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
};
|
||||
}
|
||||
Async::NotReady => break,
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum ContentDecoder {
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
Deflate(Box<ZlibDecoder<Writer>>),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
Gzip(Box<GzDecoder<Writer>>),
|
||||
#[cfg(feature = "brotli")]
|
||||
Br(Box<BrotliDecoder<Writer>>),
|
||||
}
|
||||
|
||||
impl ContentDecoder {
|
||||
#[allow(unreachable_patterns)]
|
||||
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.finish() {
|
||||
Ok(mut writer) => {
|
||||
let b = writer.take();
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
|
||||
Ok(()) => {
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
@@ -151,7 +154,6 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
@@ -163,7 +165,6 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
@@ -175,14 +176,11 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unreachable_patterns)]
|
||||
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
@@ -195,7 +193,6 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
@@ -208,7 +205,6 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
@@ -221,7 +217,6 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
_ => Ok(Some(data)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,29 @@
|
||||
//! Stream encoder
|
||||
use std::future::Future;
|
||||
use std::io::{self, Write};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_threadpool::{run, CpuFuture};
|
||||
#[cfg(feature = "brotli")]
|
||||
use brotli2::write::BrotliEncoder;
|
||||
use brotli::CompressorWriter as BrotliEncoder;
|
||||
use bytes::Bytes;
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
use flate2::write::{GzEncoder, ZlibEncoder};
|
||||
use futures::{Async, Future, Poll};
|
||||
use futures_core::ready;
|
||||
use pin_project::pin_project;
|
||||
|
||||
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
|
||||
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
|
||||
use crate::http::{HeaderValue, HttpTryFrom, StatusCode};
|
||||
use crate::http::{HeaderValue, StatusCode};
|
||||
use crate::{Error, ResponseHead};
|
||||
|
||||
use super::Writer;
|
||||
|
||||
const INPLACE: usize = 2049;
|
||||
const INPLACE: usize = 1024;
|
||||
|
||||
#[pin_project]
|
||||
pub struct Encoder<B> {
|
||||
eof: bool,
|
||||
#[pin]
|
||||
body: EncoderBody<B>,
|
||||
encoder: Option<ContentEncoder>,
|
||||
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
|
||||
@@ -33,6 +37,7 @@ impl<B: MessageBody> Encoder<B> {
|
||||
) -> ResponseBody<Encoder<B>> {
|
||||
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|
||||
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|
||||
|| head.status == StatusCode::NO_CONTENT
|
||||
|| encoding == ContentEncoding::Identity
|
||||
|| encoding == ContentEncoding::Auto);
|
||||
|
||||
@@ -53,105 +58,131 @@ impl<B: MessageBody> Encoder<B> {
|
||||
};
|
||||
|
||||
if can_encode {
|
||||
update_head(encoding, head);
|
||||
head.no_chunking(false);
|
||||
ResponseBody::Body(Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: ContentEncoder::encoder(encoding),
|
||||
})
|
||||
} else {
|
||||
ResponseBody::Body(Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: None,
|
||||
})
|
||||
// Modify response body only if encoder is not None
|
||||
if let Some(enc) = ContentEncoder::encoder(encoding) {
|
||||
update_head(encoding, head);
|
||||
head.no_chunking(false);
|
||||
return ResponseBody::Body(Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: Some(enc),
|
||||
});
|
||||
}
|
||||
}
|
||||
ResponseBody::Body(Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = EncoderBodyProj)]
|
||||
enum EncoderBody<B> {
|
||||
Bytes(Bytes),
|
||||
Stream(#[pin] B),
|
||||
BoxedStream(Box<dyn MessageBody + Unpin>),
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for EncoderBody<B> {
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
EncoderBody::Bytes(ref b) => b.size(),
|
||||
EncoderBody::Stream(ref b) => b.size(),
|
||||
EncoderBody::BoxedStream(ref b) => b.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
match self.project() {
|
||||
EncoderBodyProj::Bytes(b) => {
|
||||
if b.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(std::mem::take(b))))
|
||||
}
|
||||
}
|
||||
EncoderBodyProj::Stream(b) => b.poll_next(cx),
|
||||
EncoderBodyProj::BoxedStream(ref mut b) => {
|
||||
Pin::new(b.as_mut()).poll_next(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum EncoderBody<B> {
|
||||
Bytes(Bytes),
|
||||
Stream(B),
|
||||
BoxedStream(Box<dyn MessageBody>),
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
fn length(&self) -> BodySize {
|
||||
fn size(&self) -> BodySize {
|
||||
if self.encoder.is_none() {
|
||||
match self.body {
|
||||
EncoderBody::Bytes(ref b) => b.length(),
|
||||
EncoderBody::Stream(ref b) => b.length(),
|
||||
EncoderBody::BoxedStream(ref b) => b.length(),
|
||||
}
|
||||
self.body.size()
|
||||
} else {
|
||||
BodySize::Stream
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
let mut this = self.project();
|
||||
loop {
|
||||
if self.eof {
|
||||
return Ok(Async::Ready(None));
|
||||
if *this.eof {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
if let Some(ref mut fut) = self.fut {
|
||||
let mut encoder = futures::try_ready!(fut.poll());
|
||||
if let Some(ref mut fut) = this.fut {
|
||||
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Poll::Ready(Some(Err(e.into()))),
|
||||
};
|
||||
let chunk = encoder.take();
|
||||
self.encoder = Some(encoder);
|
||||
self.fut.take();
|
||||
*this.encoder = Some(encoder);
|
||||
this.fut.take();
|
||||
if !chunk.is_empty() {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
|
||||
let result = match self.body {
|
||||
EncoderBody::Bytes(ref mut b) => {
|
||||
if b.is_empty() {
|
||||
Async::Ready(None)
|
||||
} else {
|
||||
Async::Ready(Some(std::mem::replace(b, Bytes::new())))
|
||||
}
|
||||
}
|
||||
EncoderBody::Stream(ref mut b) => b.poll_next()?,
|
||||
EncoderBody::BoxedStream(ref mut b) => b.poll_next()?,
|
||||
};
|
||||
let result = this.body.as_mut().poll_next(cx);
|
||||
|
||||
match result {
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
Async::Ready(Some(chunk)) => {
|
||||
if let Some(mut encoder) = self.encoder.take() {
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
if let Some(mut encoder) = this.encoder.take() {
|
||||
if chunk.len() < INPLACE {
|
||||
encoder.write(&chunk)?;
|
||||
let chunk = encoder.take();
|
||||
self.encoder = Some(encoder);
|
||||
*this.encoder = Some(encoder);
|
||||
if !chunk.is_empty() {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
} else {
|
||||
self.fut = Some(run(move || {
|
||||
*this.fut = Some(run(move || {
|
||||
encoder.write(&chunk)?;
|
||||
Ok(encoder)
|
||||
}));
|
||||
}
|
||||
} else {
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
Async::Ready(None) => {
|
||||
if let Some(encoder) = self.encoder.take() {
|
||||
Poll::Ready(None) => {
|
||||
if let Some(encoder) = this.encoder.take() {
|
||||
let chunk = encoder.finish()?;
|
||||
if chunk.is_empty() {
|
||||
return Ok(Async::Ready(None));
|
||||
return Poll::Ready(None);
|
||||
} else {
|
||||
self.eof = true;
|
||||
return Ok(Async::Ready(Some(chunk)));
|
||||
*this.eof = true;
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
} else {
|
||||
return Ok(Async::Ready(None));
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
}
|
||||
val => return val,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,36 +191,33 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
|
||||
head.headers_mut().insert(
|
||||
CONTENT_ENCODING,
|
||||
HeaderValue::try_from(Bytes::from_static(encoding.as_str().as_bytes())).unwrap(),
|
||||
HeaderValue::from_static(encoding.as_str()),
|
||||
);
|
||||
}
|
||||
|
||||
enum ContentEncoder {
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
Deflate(ZlibEncoder<Writer>),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
Gzip(GzEncoder<Writer>),
|
||||
#[cfg(feature = "brotli")]
|
||||
Br(BrotliEncoder<Writer>),
|
||||
}
|
||||
|
||||
impl ContentEncoder {
|
||||
fn encoder(encoding: ContentEncoding) -> Option<Self> {
|
||||
match encoding {
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
|
||||
Writer::new(),
|
||||
flate2::Compression::fast(),
|
||||
))),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
|
||||
Writer::new(),
|
||||
flate2::Compression::fast(),
|
||||
))),
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoding::Br => {
|
||||
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
|
||||
}
|
||||
ContentEncoding::Br => Some(ContentEncoder::Br(BrotliEncoder::new(
|
||||
Writer::new(),
|
||||
32 * 1024,
|
||||
3,
|
||||
22,
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -197,28 +225,22 @@ impl ContentEncoder {
|
||||
#[inline]
|
||||
pub(crate) fn take(&mut self) -> Bytes {
|
||||
match *self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(self) -> Result<Bytes, io::Error> {
|
||||
match self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
ContentEncoder::Br(mut encoder) => match encoder.flush() {
|
||||
Ok(()) => Ok(encoder.into_inner().buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Gzip(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Deflate(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
@@ -228,7 +250,6 @@ impl ContentEncoder {
|
||||
|
||||
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
|
||||
match *self {
|
||||
#[cfg(feature = "brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
@@ -236,7 +257,6 @@ impl ContentEncoder {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
@@ -244,7 +264,6 @@ impl ContentEncoder {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
|
||||
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
|
||||
@@ -19,8 +19,9 @@ impl Writer {
|
||||
buf: BytesMut::with_capacity(8192),
|
||||
}
|
||||
}
|
||||
|
||||
fn take(&mut self) -> Bytes {
|
||||
self.buf.take().freeze()
|
||||
self.buf.split().freeze()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +30,7 @@ impl io::Write for Writer {
|
||||
self.buf.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,24 +1,29 @@
|
||||
//! Error and Result module
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::io::Write;
|
||||
use std::str::Utf8Error;
|
||||
use std::string::FromUtf8Error;
|
||||
use std::{fmt, io, result};
|
||||
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
pub use actix_threadpool::BlockingError;
|
||||
use actix_utils::dispatcher::DispatcherError as FramedDispatcherError;
|
||||
use actix_utils::timeout::TimeoutError;
|
||||
use bytes::BytesMut;
|
||||
use derive_more::{Display, From};
|
||||
use futures::Canceled;
|
||||
pub use futures_channel::oneshot::Canceled;
|
||||
use http::uri::InvalidUri;
|
||||
use http::{header, Error as HttpError, StatusCode};
|
||||
use httparse;
|
||||
use serde::de::value::Error as DeError;
|
||||
use serde_json::error::Error as JsonError;
|
||||
use serde_urlencoded::ser::Error as FormError;
|
||||
use tokio_timer::Error as TimerError;
|
||||
|
||||
// re-export for convinience
|
||||
// re-export for convenience
|
||||
use crate::body::Body;
|
||||
pub use crate::cookie::ParseError as CookieParseError;
|
||||
use crate::response::Response;
|
||||
use crate::helpers::Writer;
|
||||
use crate::response::{Response, ResponseBuilder};
|
||||
|
||||
/// A specialized [`Result`](https://doc.rust-lang.org/std/result/enum.Result.html)
|
||||
/// for actix web operations
|
||||
@@ -30,7 +35,7 @@ pub type Result<T, E = Error> = result::Result<T, E>;
|
||||
|
||||
/// General purpose actix web error.
|
||||
///
|
||||
/// An actix web error is used to carry errors from `failure` or `std::error`
|
||||
/// An actix web error is used to carry errors from `std::error`
|
||||
/// through actix in a convenient way. It can be created through
|
||||
/// converting errors with `into()`.
|
||||
///
|
||||
@@ -39,35 +44,68 @@ pub type Result<T, E = Error> = result::Result<T, E>;
|
||||
/// if you have access to an actix `Error` you can always get a
|
||||
/// `ResponseError` reference from it.
|
||||
pub struct Error {
|
||||
cause: Box<ResponseError>,
|
||||
cause: Box<dyn ResponseError>,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Returns the reference to the underlying `ResponseError`.
|
||||
pub fn as_response_error(&self) -> &ResponseError {
|
||||
pub fn as_response_error(&self) -> &dyn ResponseError {
|
||||
self.cause.as_ref()
|
||||
}
|
||||
|
||||
/// Similar to `as_response_error` but downcasts.
|
||||
pub fn as_error<T: ResponseError + 'static>(&self) -> Option<&T> {
|
||||
<dyn ResponseError>::downcast_ref(self.cause.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that can be converted to `Response`
|
||||
pub trait ResponseError: fmt::Debug + fmt::Display {
|
||||
/// Response's status code
|
||||
///
|
||||
/// Internal server error is generated by default.
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
}
|
||||
|
||||
/// Create response for error
|
||||
///
|
||||
/// Internal server error is generated by default.
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
let mut resp = Response::new(self.status_code());
|
||||
let mut buf = BytesMut::new();
|
||||
let _ = write!(Writer(&mut buf), "{}", self);
|
||||
resp.headers_mut().insert(
|
||||
header::CONTENT_TYPE,
|
||||
header::HeaderValue::from_static("text/plain; charset=utf-8"),
|
||||
);
|
||||
resp.set_body(Body::from(buf))
|
||||
}
|
||||
|
||||
downcast_get_type_id!();
|
||||
}
|
||||
|
||||
downcast!(ResponseError);
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(&self.cause, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f, "{:?}", &self.cause)
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}", &self.cause)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {
|
||||
fn cause(&self) -> Option<&dyn std::error::Error> {
|
||||
None
|
||||
}
|
||||
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,17 +115,11 @@ impl From<()> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {
|
||||
fn description(&self) -> &str {
|
||||
"actix-http::Error"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn std::error::Error> {
|
||||
None
|
||||
}
|
||||
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
None
|
||||
impl From<std::convert::Infallible> for Error {
|
||||
fn from(_: std::convert::Infallible) -> Self {
|
||||
// `std::convert::Infallible` indicates an error
|
||||
// that will never happen
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,12 +139,26 @@ impl<T: ResponseError + 'static> From<T> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert Response to a Error
|
||||
impl From<Response> for Error {
|
||||
fn from(res: Response) -> Error {
|
||||
InternalError::from_response("", res).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert ResponseBuilder to a Error
|
||||
impl From<ResponseBuilder> for Error {
|
||||
fn from(mut res: ResponseBuilder) -> Error {
|
||||
InternalError::from_response("", res.finish()).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Return `GATEWAY_TIMEOUT` for `TimeoutError`
|
||||
impl<E: ResponseError> ResponseError for TimeoutError<E> {
|
||||
fn error_response(&self) -> Response {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
TimeoutError::Service(e) => e.error_response(),
|
||||
TimeoutError::Timeout => Response::new(StatusCode::GATEWAY_TIMEOUT),
|
||||
TimeoutError::Service(e) => e.status_code(),
|
||||
TimeoutError::Timeout => StatusCode::GATEWAY_TIMEOUT,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,7 +167,7 @@ impl<E: ResponseError> ResponseError for TimeoutError<E> {
|
||||
#[display(fmt = "UnknownError")]
|
||||
struct UnitError;
|
||||
|
||||
/// `InternalServerError` for `JsonError`
|
||||
/// `InternalServerError` for `UnitError`
|
||||
impl ResponseError for UnitError {}
|
||||
|
||||
/// `InternalServerError` for `JsonError`
|
||||
@@ -130,27 +176,31 @@ impl ResponseError for JsonError {}
|
||||
/// `InternalServerError` for `FormError`
|
||||
impl ResponseError for FormError {}
|
||||
|
||||
/// `InternalServerError` for `TimerError`
|
||||
impl ResponseError for TimerError {}
|
||||
#[cfg(feature = "openssl")]
|
||||
/// `InternalServerError` for `openssl::ssl::Error`
|
||||
impl ResponseError for actix_connect::ssl::openssl::SslError {}
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
/// `InternalServerError` for `SslError`
|
||||
impl ResponseError for openssl::ssl::Error {}
|
||||
#[cfg(feature = "openssl")]
|
||||
/// `InternalServerError` for `openssl::ssl::HandshakeError`
|
||||
impl<T: std::fmt::Debug> ResponseError for actix_tls::openssl::HandshakeError<T> {}
|
||||
|
||||
/// Return `BAD_REQUEST` for `de::value::Error`
|
||||
impl ResponseError for DeError {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
/// `InternalServerError` for `Canceled`
|
||||
impl ResponseError for Canceled {}
|
||||
|
||||
/// `InternalServerError` for `BlockingError`
|
||||
impl<E: fmt::Debug> ResponseError for BlockingError<E> {}
|
||||
|
||||
/// Return `BAD_REQUEST` for `Utf8Error`
|
||||
impl ResponseError for Utf8Error {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,32 +210,22 @@ impl ResponseError for HttpError {}
|
||||
|
||||
/// Return `InternalServerError` for `io::Error`
|
||||
impl ResponseError for io::Error {
|
||||
fn error_response(&self) -> Response {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self.kind() {
|
||||
io::ErrorKind::NotFound => Response::new(StatusCode::NOT_FOUND),
|
||||
io::ErrorKind::PermissionDenied => Response::new(StatusCode::FORBIDDEN),
|
||||
_ => Response::new(StatusCode::INTERNAL_SERVER_ERROR),
|
||||
io::ErrorKind::NotFound => StatusCode::NOT_FOUND,
|
||||
io::ErrorKind::PermissionDenied => StatusCode::FORBIDDEN,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `BadRequest` for `InvalidHeaderValue`
|
||||
impl ResponseError for header::InvalidHeaderValue {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
/// `BadRequest` for `InvalidHeaderValue`
|
||||
impl ResponseError for header::InvalidHeaderValueBytes {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
}
|
||||
}
|
||||
|
||||
/// `InternalServerError` for `futures::Canceled`
|
||||
impl ResponseError for Canceled {}
|
||||
|
||||
/// A set of errors that can occur during parsing HTTP streams
|
||||
#[derive(Debug, Display)]
|
||||
pub enum ParseError {
|
||||
@@ -225,8 +265,8 @@ pub enum ParseError {
|
||||
|
||||
/// Return `BadRequest` for `ParseError`
|
||||
impl ResponseError for ParseError {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,6 +334,8 @@ pub enum PayloadError {
|
||||
Io(io::Error),
|
||||
}
|
||||
|
||||
impl std::error::Error for PayloadError {}
|
||||
|
||||
impl From<h2::Error> for PayloadError {
|
||||
fn from(err: h2::Error) -> Self {
|
||||
PayloadError::Http2Payload(err)
|
||||
@@ -318,7 +360,7 @@ impl From<BlockingError<io::Error>> for PayloadError {
|
||||
BlockingError::Error(e) => PayloadError::Io(e),
|
||||
BlockingError::Canceled => PayloadError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Thread pool is gone",
|
||||
"Operation is canceled",
|
||||
)),
|
||||
}
|
||||
}
|
||||
@@ -329,18 +371,18 @@ impl From<BlockingError<io::Error>> for PayloadError {
|
||||
/// - `Overflow` returns `PayloadTooLarge`
|
||||
/// - Other errors returns `BadRequest`
|
||||
impl ResponseError for PayloadError {
|
||||
fn error_response(&self) -> Response {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match *self {
|
||||
PayloadError::Overflow => Response::new(StatusCode::PAYLOAD_TOO_LARGE),
|
||||
_ => Response::new(StatusCode::BAD_REQUEST),
|
||||
PayloadError::Overflow => StatusCode::PAYLOAD_TOO_LARGE,
|
||||
_ => StatusCode::BAD_REQUEST,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return `BadRequest` for `cookie::ParseError`
|
||||
impl ResponseError for crate::cookie::ParseError {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,6 +392,9 @@ pub enum DispatchError {
|
||||
/// Service error
|
||||
Service(Error),
|
||||
|
||||
/// Upgrade service error
|
||||
Upgrade,
|
||||
|
||||
/// An `io::Error` that occurred while trying to read or write to a network
|
||||
/// stream.
|
||||
#[display(fmt = "IO error: {}", _0)]
|
||||
@@ -388,7 +433,7 @@ pub enum DispatchError {
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// A set of error that can occure during parsing content type
|
||||
/// A set of error that can occur during parsing content type
|
||||
#[derive(PartialEq, Debug, Display)]
|
||||
pub enum ContentTypeError {
|
||||
/// Can not parse content type
|
||||
@@ -399,13 +444,23 @@ pub enum ContentTypeError {
|
||||
UnknownEncoding,
|
||||
}
|
||||
|
||||
impl std::error::Error for ContentTypeError {}
|
||||
|
||||
/// Return `BadRequest` for `ContentTypeError`
|
||||
impl ResponseError for ContentTypeError {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::BAD_REQUEST)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, U: Encoder<I> + Decoder, I> ResponseError for FramedDispatcherError<E, U, I>
|
||||
where
|
||||
E: fmt::Debug + fmt::Display,
|
||||
<U as Encoder<I>>::Error: fmt::Debug,
|
||||
<U as Decoder>::Error: fmt::Debug,
|
||||
{
|
||||
}
|
||||
|
||||
/// Helper type that can wrap any error and generate custom response.
|
||||
///
|
||||
/// In following example any `io::Error` will be converted into "BAD REQUEST"
|
||||
@@ -413,14 +468,12 @@ impl ResponseError for ContentTypeError {
|
||||
/// default.
|
||||
///
|
||||
/// ```rust
|
||||
/// # extern crate actix_http;
|
||||
/// # use std::io;
|
||||
/// # use actix_http::*;
|
||||
///
|
||||
/// fn index(req: Request) -> Result<&'static str> {
|
||||
/// Err(error::ErrorBadRequest(io::Error::new(io::ErrorKind::Other, "error")))
|
||||
/// }
|
||||
/// # fn main() {}
|
||||
/// ```
|
||||
pub struct InternalError<T> {
|
||||
cause: T,
|
||||
@@ -454,7 +507,7 @@ impl<T> fmt::Debug for InternalError<T>
|
||||
where
|
||||
T: fmt::Debug + 'static,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&self.cause, f)
|
||||
}
|
||||
}
|
||||
@@ -463,7 +516,7 @@ impl<T> fmt::Display for InternalError<T>
|
||||
where
|
||||
T: fmt::Display + 'static,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(&self.cause, f)
|
||||
}
|
||||
}
|
||||
@@ -472,9 +525,31 @@ impl<T> ResponseError for InternalError<T>
|
||||
where
|
||||
T: fmt::Debug + fmt::Display + 'static,
|
||||
{
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self.status {
|
||||
InternalErrorType::Status(st) => st,
|
||||
InternalErrorType::Response(ref resp) => {
|
||||
if let Some(resp) = resp.borrow().as_ref() {
|
||||
resp.head().status
|
||||
} else {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn error_response(&self) -> Response {
|
||||
match self.status {
|
||||
InternalErrorType::Status(st) => Response::new(st),
|
||||
InternalErrorType::Status(st) => {
|
||||
let mut res = Response::new(st);
|
||||
let mut buf = BytesMut::new();
|
||||
let _ = write!(Writer(&mut buf), "{}", self);
|
||||
res.headers_mut().insert(
|
||||
header::CONTENT_TYPE,
|
||||
header::HeaderValue::from_static("text/plain; charset=utf-8"),
|
||||
);
|
||||
res.set_body(Body::from(buf))
|
||||
}
|
||||
InternalErrorType::Response(ref resp) => {
|
||||
if let Some(resp) = resp.borrow_mut().take() {
|
||||
resp
|
||||
@@ -486,13 +561,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert Response to a Error
|
||||
impl From<Response> for Error {
|
||||
fn from(res: Response) -> Error {
|
||||
InternalError::from_response("", res).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function that creates wrapper of any error and generate *BAD
|
||||
/// REQUEST* response.
|
||||
#[allow(non_snake_case)]
|
||||
@@ -883,24 +951,20 @@ where
|
||||
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
|
||||
}
|
||||
|
||||
#[cfg(feature = "fail")]
|
||||
mod failure_integration {
|
||||
use super::*;
|
||||
#[cfg(feature = "actors")]
|
||||
/// `InternalServerError` for `actix::MailboxError`
|
||||
/// This is supported on feature=`actors` only
|
||||
impl ResponseError for actix::MailboxError {}
|
||||
|
||||
/// Compatibility for `failure::Error`
|
||||
impl ResponseError for failure::Error {
|
||||
fn error_response(&self) -> Response {
|
||||
Response::new(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "actors")]
|
||||
/// `InternalServerError` for `actix::ResolverError`
|
||||
/// This is supported on feature=`actors` only
|
||||
impl ResponseError for actix::actors::resolver::ResolverError {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use http::{Error as HttpError, StatusCode};
|
||||
use httparse;
|
||||
use std::error::Error as StdError;
|
||||
use std::io;
|
||||
|
||||
#[test]
|
||||
@@ -929,7 +993,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_error_cause() {
|
||||
let orig = io::Error::new(io::ErrorKind::Other, "other");
|
||||
let desc = orig.description().to_owned();
|
||||
let desc = orig.to_string();
|
||||
let e = Error::from(orig);
|
||||
assert_eq!(format!("{}", e.as_response_error()), desc);
|
||||
}
|
||||
@@ -937,7 +1001,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let orig = io::Error::new(io::ErrorKind::Other, "other");
|
||||
let desc = orig.description().to_owned();
|
||||
let desc = orig.to_string();
|
||||
let e = Error::from(orig);
|
||||
assert_eq!(format!("{}", e), desc);
|
||||
}
|
||||
@@ -979,7 +1043,7 @@ mod tests {
|
||||
match ParseError::from($from) {
|
||||
e @ $error => {
|
||||
let desc = format!("{}", e);
|
||||
assert_eq!(desc, format!("IO error: {}", $from.description()));
|
||||
assert_eq!(desc, format!("IO error: {}", $from));
|
||||
}
|
||||
_ => unreachable!("{:?}", $from),
|
||||
}
|
||||
@@ -1007,6 +1071,16 @@ mod tests {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_casting() {
|
||||
let err = PayloadError::Overflow;
|
||||
let resp_err: &dyn ResponseError = &err;
|
||||
let err = resp_err.downcast_ref::<PayloadError>().unwrap();
|
||||
assert_eq!(err.to_string(), "A payload reached size limit.");
|
||||
let not_err = resp_err.downcast_ref::<ContentTypeError>();
|
||||
assert!(not_err.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_helpers() {
|
||||
let r: Response = ErrorBadRequest("err").into();
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use std::any::{Any, TypeId};
|
||||
use std::fmt;
|
||||
use std::{fmt, mem};
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use fxhash::FxHashMap;
|
||||
|
||||
#[derive(Default)]
|
||||
/// A type map of request extensions.
|
||||
pub struct Extensions {
|
||||
map: HashMap<TypeId, Box<Any>>,
|
||||
/// Use FxHasher with a std HashMap with for faster
|
||||
/// lookups on the small `TypeId` (u64 equivalent) keys.
|
||||
map: FxHashMap<TypeId, Box<dyn Any>>,
|
||||
}
|
||||
|
||||
impl Extensions {
|
||||
@@ -14,7 +16,7 @@ impl Extensions {
|
||||
#[inline]
|
||||
pub fn new() -> Extensions {
|
||||
Extensions {
|
||||
map: HashMap::default(),
|
||||
map: FxHashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,33 +30,30 @@ impl Extensions {
|
||||
|
||||
/// Check if container contains entry
|
||||
pub fn contains<T: 'static>(&self) -> bool {
|
||||
self.map.get(&TypeId::of::<T>()).is_some()
|
||||
self.map.contains_key(&TypeId::of::<T>())
|
||||
}
|
||||
|
||||
/// Get a reference to a type previously inserted on this `Extensions`.
|
||||
pub fn get<T: 'static>(&self) -> Option<&T> {
|
||||
self.map
|
||||
.get(&TypeId::of::<T>())
|
||||
.and_then(|boxed| (&**boxed as &(Any + 'static)).downcast_ref())
|
||||
.and_then(|boxed| boxed.downcast_ref())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a type previously inserted on this `Extensions`.
|
||||
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
|
||||
self.map
|
||||
.get_mut(&TypeId::of::<T>())
|
||||
.and_then(|boxed| (&mut **boxed as &mut (Any + 'static)).downcast_mut())
|
||||
.and_then(|boxed| boxed.downcast_mut())
|
||||
}
|
||||
|
||||
/// Remove a type from this `Extensions`.
|
||||
///
|
||||
/// If a extension of this type existed, it will be returned.
|
||||
pub fn remove<T: 'static>(&mut self) -> Option<T> {
|
||||
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
|
||||
(boxed as Box<Any + 'static>)
|
||||
.downcast()
|
||||
.ok()
|
||||
.map(|boxed| *boxed)
|
||||
})
|
||||
self.map
|
||||
.remove(&TypeId::of::<T>())
|
||||
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
|
||||
}
|
||||
|
||||
/// Clear the `Extensions` of all inserted extensions.
|
||||
@@ -62,30 +61,184 @@ impl Extensions {
|
||||
pub fn clear(&mut self) {
|
||||
self.map.clear();
|
||||
}
|
||||
|
||||
/// Extends self with the items from another `Extensions`.
|
||||
pub fn extend(&mut self, other: Extensions) {
|
||||
self.map.extend(other.map);
|
||||
}
|
||||
|
||||
/// Sets (or overrides) items from `other` into this map.
|
||||
pub(crate) fn drain_from(&mut self, other: &mut Self) {
|
||||
self.map.extend(mem::take(&mut other.map));
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Extensions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Extensions").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extensions() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct MyType(i32);
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
let mut extensions = Extensions::new();
|
||||
#[test]
|
||||
fn test_remove() {
|
||||
let mut map = Extensions::new();
|
||||
|
||||
extensions.insert(5i32);
|
||||
extensions.insert(MyType(10));
|
||||
map.insert::<i8>(123);
|
||||
assert!(map.get::<i8>().is_some());
|
||||
|
||||
assert_eq!(extensions.get(), Some(&5i32));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
|
||||
map.remove::<i8>();
|
||||
assert!(map.get::<i8>().is_none());
|
||||
}
|
||||
|
||||
assert_eq!(extensions.remove::<i32>(), Some(5i32));
|
||||
assert!(extensions.get::<i32>().is_none());
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let mut map = Extensions::new();
|
||||
|
||||
assert_eq!(extensions.get::<bool>(), None);
|
||||
assert_eq!(extensions.get(), Some(&MyType(10)));
|
||||
map.insert::<i8>(8);
|
||||
map.insert::<i16>(16);
|
||||
map.insert::<i32>(32);
|
||||
|
||||
assert!(map.contains::<i8>());
|
||||
assert!(map.contains::<i16>());
|
||||
assert!(map.contains::<i32>());
|
||||
|
||||
map.clear();
|
||||
|
||||
assert!(!map.contains::<i8>());
|
||||
assert!(!map.contains::<i16>());
|
||||
assert!(!map.contains::<i32>());
|
||||
|
||||
map.insert::<i8>(10);
|
||||
assert_eq!(*map.get::<i8>().unwrap(), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_integers() {
|
||||
let mut map = Extensions::new();
|
||||
|
||||
map.insert::<i8>(8);
|
||||
map.insert::<i16>(16);
|
||||
map.insert::<i32>(32);
|
||||
map.insert::<i64>(64);
|
||||
map.insert::<i128>(128);
|
||||
map.insert::<u8>(8);
|
||||
map.insert::<u16>(16);
|
||||
map.insert::<u32>(32);
|
||||
map.insert::<u64>(64);
|
||||
map.insert::<u128>(128);
|
||||
assert!(map.get::<i8>().is_some());
|
||||
assert!(map.get::<i16>().is_some());
|
||||
assert!(map.get::<i32>().is_some());
|
||||
assert!(map.get::<i64>().is_some());
|
||||
assert!(map.get::<i128>().is_some());
|
||||
assert!(map.get::<u8>().is_some());
|
||||
assert!(map.get::<u16>().is_some());
|
||||
assert!(map.get::<u32>().is_some());
|
||||
assert!(map.get::<u64>().is_some());
|
||||
assert!(map.get::<u128>().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composition() {
|
||||
struct Magi<T>(pub T);
|
||||
|
||||
struct Madoka {
|
||||
pub god: bool,
|
||||
}
|
||||
|
||||
struct Homura {
|
||||
pub attempts: usize,
|
||||
}
|
||||
|
||||
struct Mami {
|
||||
pub guns: usize,
|
||||
}
|
||||
|
||||
let mut map = Extensions::new();
|
||||
|
||||
map.insert(Magi(Madoka { god: false }));
|
||||
map.insert(Magi(Homura { attempts: 0 }));
|
||||
map.insert(Magi(Mami { guns: 999 }));
|
||||
|
||||
assert!(!map.get::<Magi<Madoka>>().unwrap().0.god);
|
||||
assert_eq!(0, map.get::<Magi<Homura>>().unwrap().0.attempts);
|
||||
assert_eq!(999, map.get::<Magi<Mami>>().unwrap().0.guns);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extensions() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct MyType(i32);
|
||||
|
||||
let mut extensions = Extensions::new();
|
||||
|
||||
extensions.insert(5i32);
|
||||
extensions.insert(MyType(10));
|
||||
|
||||
assert_eq!(extensions.get(), Some(&5i32));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
|
||||
|
||||
assert_eq!(extensions.remove::<i32>(), Some(5i32));
|
||||
assert!(extensions.get::<i32>().is_none());
|
||||
|
||||
assert_eq!(extensions.get::<bool>(), None);
|
||||
assert_eq!(extensions.get(), Some(&MyType(10)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extend() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct MyType(i32);
|
||||
|
||||
let mut extensions = Extensions::new();
|
||||
|
||||
extensions.insert(5i32);
|
||||
extensions.insert(MyType(10));
|
||||
|
||||
let mut other = Extensions::new();
|
||||
|
||||
other.insert(15i32);
|
||||
other.insert(20u8);
|
||||
|
||||
extensions.extend(other);
|
||||
|
||||
assert_eq!(extensions.get(), Some(&15i32));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 15i32));
|
||||
|
||||
assert_eq!(extensions.remove::<i32>(), Some(15i32));
|
||||
assert!(extensions.get::<i32>().is_none());
|
||||
|
||||
assert_eq!(extensions.get::<bool>(), None);
|
||||
assert_eq!(extensions.get(), Some(&MyType(10)));
|
||||
|
||||
assert_eq!(extensions.get(), Some(&20u8));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drain_from() {
|
||||
let mut ext = Extensions::new();
|
||||
ext.insert(2isize);
|
||||
|
||||
let mut more_ext = Extensions::new();
|
||||
|
||||
more_ext.insert(5isize);
|
||||
more_ext.insert(5usize);
|
||||
|
||||
assert_eq!(ext.get::<isize>(), Some(&2isize));
|
||||
assert_eq!(ext.get::<usize>(), None);
|
||||
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
|
||||
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
|
||||
|
||||
ext.drain_from(&mut more_ext);
|
||||
|
||||
assert_eq!(ext.get::<isize>(), Some(&5isize));
|
||||
assert_eq!(ext.get::<usize>(), Some(&5usize));
|
||||
assert_eq!(more_ext.get::<isize>(), None);
|
||||
assert_eq!(more_ext.get::<usize>(), None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
#![allow(unused_imports, unused_variables, dead_code)]
|
||||
use std::io::{self, Write};
|
||||
use std::io;
|
||||
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
use bitflags::bitflags;
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use http::header::{
|
||||
HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
|
||||
};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::{Method, Version};
|
||||
|
||||
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
|
||||
@@ -15,8 +11,7 @@ use super::{Message, MessageType};
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::{ParseError, PayloadError};
|
||||
use crate::helpers;
|
||||
use crate::message::{ConnectionType, Head, MessagePool, RequestHead, ResponseHead};
|
||||
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
|
||||
|
||||
bitflags! {
|
||||
struct Flags: u8 {
|
||||
@@ -26,8 +21,6 @@ bitflags! {
|
||||
}
|
||||
}
|
||||
|
||||
const AVERAGE_HEADER_SIZE: usize = 30;
|
||||
|
||||
/// HTTP/1 Codec
|
||||
pub struct ClientCodec {
|
||||
inner: ClientCodecInner,
|
||||
@@ -47,8 +40,7 @@ struct ClientCodecInner {
|
||||
|
||||
// encoder part
|
||||
flags: Flags,
|
||||
headers_size: u32,
|
||||
encoder: encoder::MessageEncoder<RequestHead>,
|
||||
encoder: encoder::MessageEncoder<RequestHeadType>,
|
||||
}
|
||||
|
||||
impl Default for ClientCodec {
|
||||
@@ -76,7 +68,6 @@ impl ClientCodec {
|
||||
ctype: ConnectionType::Close,
|
||||
|
||||
flags,
|
||||
headers_size: 0,
|
||||
encoder: encoder::MessageEncoder::default(),
|
||||
},
|
||||
}
|
||||
@@ -182,23 +173,24 @@ impl Decoder for ClientPayloadCodec {
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for ClientCodec {
|
||||
type Item = Message<(RequestHead, BodySize)>;
|
||||
impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: Self::Item,
|
||||
item: Message<(RequestHeadType, BodySize)>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
match item {
|
||||
Message::Item((mut msg, length)) => {
|
||||
Message::Item((mut head, length)) => {
|
||||
let inner = &mut self.inner;
|
||||
inner.version = msg.version;
|
||||
inner.flags.set(Flags::HEAD, msg.method == Method::HEAD);
|
||||
inner.version = head.as_ref().version;
|
||||
inner
|
||||
.flags
|
||||
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
|
||||
|
||||
// connection status
|
||||
inner.ctype = match msg.connection_type() {
|
||||
inner.ctype = match head.as_ref().connection_type() {
|
||||
ConnectionType::KeepAlive => {
|
||||
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
|
||||
ConnectionType::KeepAlive
|
||||
@@ -212,7 +204,7 @@ impl Encoder for ClientCodec {
|
||||
|
||||
inner.encoder.encode(
|
||||
dst,
|
||||
&mut msg,
|
||||
&mut head,
|
||||
false,
|
||||
false,
|
||||
inner.version,
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
#![allow(unused_imports, unused_variables, dead_code)]
|
||||
use std::fmt;
|
||||
use std::io::{self, Write};
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
use bitflags::bitflags;
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
|
||||
use http::{Method, StatusCode, Version};
|
||||
use bytes::BytesMut;
|
||||
use http::{Method, Version};
|
||||
|
||||
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
|
||||
use super::{decoder, encoder};
|
||||
@@ -14,8 +11,7 @@ use super::{Message, MessageType};
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::ParseError;
|
||||
use crate::helpers;
|
||||
use crate::message::{ConnectionType, Head, ResponseHead};
|
||||
use crate::message::ConnectionType;
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
|
||||
@@ -27,11 +23,9 @@ bitflags! {
|
||||
}
|
||||
}
|
||||
|
||||
const AVERAGE_HEADER_SIZE: usize = 30;
|
||||
|
||||
/// HTTP/1 Codec
|
||||
pub struct Codec {
|
||||
pub(crate) config: ServiceConfig,
|
||||
config: ServiceConfig,
|
||||
decoder: decoder::MessageDecoder<Request>,
|
||||
payload: Option<PayloadDecoder>,
|
||||
version: Version,
|
||||
@@ -40,7 +34,6 @@ pub struct Codec {
|
||||
// encoder part
|
||||
flags: Flags,
|
||||
encoder: encoder::MessageEncoder<Response<()>>,
|
||||
// headers_size: u32,
|
||||
}
|
||||
|
||||
impl Default for Codec {
|
||||
@@ -50,7 +43,7 @@ impl Default for Codec {
|
||||
}
|
||||
|
||||
impl fmt::Debug for Codec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "h1::Codec({:?})", self.flags)
|
||||
}
|
||||
}
|
||||
@@ -67,13 +60,11 @@ impl Codec {
|
||||
};
|
||||
Codec {
|
||||
config,
|
||||
flags,
|
||||
decoder: decoder::MessageDecoder::default(),
|
||||
payload: None,
|
||||
version: Version::HTTP_11,
|
||||
ctype: ConnectionType::Close,
|
||||
|
||||
flags,
|
||||
// headers_size: 0,
|
||||
encoder: encoder::MessageEncoder::default(),
|
||||
}
|
||||
}
|
||||
@@ -107,6 +98,11 @@ impl Codec {
|
||||
MessageType::Payload
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn config(&self) -> &ServiceConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for Codec {
|
||||
@@ -148,13 +144,12 @@ impl Decoder for Codec {
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for Codec {
|
||||
type Item = Message<(Response<()>, BodySize)>;
|
||||
impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: Self::Item,
|
||||
item: Message<(Response<()>, BodySize)>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
match item {
|
||||
@@ -174,7 +169,6 @@ impl Encoder for Codec {
|
||||
};
|
||||
|
||||
// encode message
|
||||
let len = dst.len();
|
||||
self.encoder.encode(
|
||||
dst,
|
||||
&mut res,
|
||||
@@ -200,17 +194,11 @@ impl Encoder for Codec {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{cmp, io};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use http::{Method, Version};
|
||||
use bytes::BytesMut;
|
||||
use http::Method;
|
||||
|
||||
use super::*;
|
||||
use crate::error::ParseError;
|
||||
use crate::h1::Message;
|
||||
use crate::httpmessage::HttpMessage;
|
||||
use crate::request::Request;
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_and_next_message() {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::{io, mem};
|
||||
use std::task::Poll;
|
||||
|
||||
use actix_codec::Decoder;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{Async, Poll};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use http::header::{HeaderName, HeaderValue};
|
||||
use http::{header, HttpTryFrom, Method, StatusCode, Uri, Version};
|
||||
use httparse;
|
||||
use http::{header, Method, StatusCode, Uri, Version};
|
||||
use log::{debug, error, trace};
|
||||
|
||||
use crate::error::ParseError;
|
||||
@@ -17,7 +17,7 @@ use crate::request::Request;
|
||||
const MAX_BUFFER_SIZE: usize = 131_072;
|
||||
const MAX_HEADERS: usize = 96;
|
||||
|
||||
/// Incoming messagd decoder
|
||||
/// Incoming message decoder
|
||||
pub(crate) struct MessageDecoder<T: MessageType>(PhantomData<T>);
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -45,7 +45,7 @@ impl<T: MessageType> Decoder for MessageDecoder<T> {
|
||||
|
||||
pub(crate) enum PayloadLength {
|
||||
Payload(PayloadType),
|
||||
Upgrade,
|
||||
UpgradeWebSocket,
|
||||
None,
|
||||
}
|
||||
|
||||
@@ -64,9 +64,10 @@ pub(crate) trait MessageType: Sized {
|
||||
raw_headers: &[HeaderIndex],
|
||||
) -> Result<PayloadLength, ParseError> {
|
||||
let mut ka = None;
|
||||
let mut has_upgrade = false;
|
||||
let mut has_upgrade_websocket = false;
|
||||
let mut expect = false;
|
||||
let mut chunked = false;
|
||||
let mut seen_te = false;
|
||||
let mut content_length = None;
|
||||
|
||||
{
|
||||
@@ -76,15 +77,26 @@ pub(crate) trait MessageType: Sized {
|
||||
let name =
|
||||
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
|
||||
|
||||
// Unsafe: httparse check header value for valid utf-8
|
||||
// SAFETY: httparse already checks header value is only visible ASCII bytes
|
||||
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
|
||||
let value = unsafe {
|
||||
HeaderValue::from_shared_unchecked(
|
||||
slice.slice(idx.value.0, idx.value.1),
|
||||
HeaderValue::from_maybe_shared_unchecked(
|
||||
slice.slice(idx.value.0..idx.value.1),
|
||||
)
|
||||
};
|
||||
|
||||
match name {
|
||||
header::CONTENT_LENGTH => {
|
||||
if let Ok(s) = value.to_str() {
|
||||
header::CONTENT_LENGTH if content_length.is_some() => {
|
||||
debug!("multiple Content-Length");
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::CONTENT_LENGTH => match value.to_str() {
|
||||
Ok(s) if s.trim().starts_with("+") => {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
Ok(s) => {
|
||||
if let Ok(len) = s.parse::<u64>() {
|
||||
if len != 0 {
|
||||
content_length = Some(len);
|
||||
@@ -93,15 +105,31 @@ pub(crate) trait MessageType: Sized {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
} else {
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("illegal Content-Length: {:?}", value);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// transfer-encoding
|
||||
header::TRANSFER_ENCODING if seen_te => {
|
||||
debug!("multiple Transfer-Encoding not allowed");
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::TRANSFER_ENCODING => {
|
||||
seen_te = true;
|
||||
|
||||
if let Ok(s) = value.to_str().map(|s| s.trim()) {
|
||||
chunked = s.eq_ignore_ascii_case("chunked");
|
||||
if s.eq_ignore_ascii_case("chunked") {
|
||||
chunked = true;
|
||||
} else if s.eq_ignore_ascii_case("identity") {
|
||||
// allow silently since multiple TE headers are already checked
|
||||
} else {
|
||||
debug!("illegal Transfer-Encoding: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
} else {
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
@@ -123,12 +151,9 @@ pub(crate) trait MessageType: Sized {
|
||||
};
|
||||
}
|
||||
header::UPGRADE => {
|
||||
has_upgrade = true;
|
||||
// check content-length, some clients (dart)
|
||||
// sends "content-length: 0" with websocket upgrade
|
||||
if let Ok(val) = value.to_str().map(|val| val.trim()) {
|
||||
if val.eq_ignore_ascii_case("websocket") {
|
||||
content_length = None;
|
||||
has_upgrade_websocket = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -155,13 +180,13 @@ pub(crate) trait MessageType: Sized {
|
||||
Ok(PayloadLength::Payload(PayloadType::Payload(
|
||||
PayloadDecoder::chunked(),
|
||||
)))
|
||||
} else if has_upgrade_websocket {
|
||||
Ok(PayloadLength::UpgradeWebSocket)
|
||||
} else if let Some(len) = content_length {
|
||||
// Content-Length
|
||||
Ok(PayloadLength::Payload(PayloadType::Payload(
|
||||
PayloadDecoder::length(len),
|
||||
)))
|
||||
} else if has_upgrade {
|
||||
Ok(PayloadLength::Upgrade)
|
||||
} else {
|
||||
Ok(PayloadLength::None)
|
||||
}
|
||||
@@ -184,13 +209,10 @@ impl MessageType for Request {
|
||||
}
|
||||
|
||||
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
|
||||
// Unsafe: we read only this data only after httparse parses headers into.
|
||||
// performance bump for pipeline benchmarks.
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
|
||||
|
||||
let (len, method, uri, ver, h_len) = {
|
||||
let mut parsed: [httparse::Header; MAX_HEADERS] =
|
||||
unsafe { mem::uninitialized() };
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
|
||||
|
||||
let mut req = httparse::Request::new(&mut parsed);
|
||||
match req.parse(src)? {
|
||||
@@ -219,7 +241,7 @@ impl MessageType for Request {
|
||||
// payload decoder
|
||||
let decoder = match length {
|
||||
PayloadLength::Payload(pl) => pl,
|
||||
PayloadLength::Upgrade => {
|
||||
PayloadLength::UpgradeWebSocket => {
|
||||
// upgrade(websocket)
|
||||
PayloadType::Stream(PayloadDecoder::eof())
|
||||
}
|
||||
@@ -258,13 +280,10 @@ impl MessageType for ResponseHead {
|
||||
}
|
||||
|
||||
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
|
||||
// Unsafe: we read only this data only after httparse parses headers into.
|
||||
// performance bump for pipeline benchmarks.
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
|
||||
|
||||
let (len, ver, status, h_len) = {
|
||||
let mut parsed: [httparse::Header; MAX_HEADERS] =
|
||||
unsafe { mem::uninitialized() };
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
|
||||
|
||||
let mut res = httparse::Response::new(&mut parsed);
|
||||
match res.parse(src)? {
|
||||
@@ -300,7 +319,13 @@ impl MessageType for ResponseHead {
|
||||
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
return Err(ParseError::TooLarge);
|
||||
} else {
|
||||
PayloadType::None
|
||||
// for HTTP/1.0 read to eof and close connection
|
||||
if msg.version == Version::HTTP_10 {
|
||||
msg.set_connection_type(ConnectionType::Close);
|
||||
PayloadType::Payload(PayloadDecoder::eof())
|
||||
} else {
|
||||
PayloadType::None
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some((msg, decoder)))
|
||||
@@ -313,10 +338,21 @@ pub(crate) struct HeaderIndex {
|
||||
pub(crate) value: (usize, usize),
|
||||
}
|
||||
|
||||
pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
|
||||
name: (0, 0),
|
||||
value: (0, 0),
|
||||
};
|
||||
|
||||
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
|
||||
[EMPTY_HEADER_INDEX; MAX_HEADERS];
|
||||
|
||||
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
|
||||
[httparse::EMPTY_HEADER; MAX_HEADERS];
|
||||
|
||||
impl HeaderIndex {
|
||||
pub(crate) fn record(
|
||||
bytes: &[u8],
|
||||
headers: &[httparse::Header],
|
||||
headers: &[httparse::Header<'_>],
|
||||
indices: &mut [HeaderIndex],
|
||||
) {
|
||||
let bytes_ptr = bytes.as_ptr() as usize;
|
||||
@@ -331,7 +367,7 @@ impl HeaderIndex {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
/// Http payload item
|
||||
pub enum PayloadItem {
|
||||
Chunk(Bytes),
|
||||
@@ -419,7 +455,7 @@ impl Decoder for PayloadDecoder {
|
||||
let len = src.len() as u64;
|
||||
let buf;
|
||||
if *remaining > len {
|
||||
buf = src.take().freeze();
|
||||
buf = src.split().freeze();
|
||||
*remaining -= len;
|
||||
} else {
|
||||
buf = src.split_to(*remaining as usize).freeze();
|
||||
@@ -433,9 +469,10 @@ impl Decoder for PayloadDecoder {
|
||||
loop {
|
||||
let mut buf = None;
|
||||
// advances the chunked state
|
||||
*state = match state.step(src, size, &mut buf)? {
|
||||
Async::NotReady => return Ok(None),
|
||||
Async::Ready(state) => state,
|
||||
*state = match state.step(src, size, &mut buf) {
|
||||
Poll::Pending => return Ok(None),
|
||||
Poll::Ready(Ok(state)) => state,
|
||||
Poll::Ready(Err(e)) => return Err(e),
|
||||
};
|
||||
if *state == ChunkedState::End {
|
||||
trace!("End of chunked stream");
|
||||
@@ -453,7 +490,7 @@ impl Decoder for PayloadDecoder {
|
||||
if src.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(PayloadItem::Chunk(src.take().freeze())))
|
||||
Ok(Some(PayloadItem::Chunk(src.split().freeze())))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -464,10 +501,10 @@ macro_rules! byte (
|
||||
($rdr:ident) => ({
|
||||
if $rdr.len() > 0 {
|
||||
let b = $rdr[0];
|
||||
$rdr.split_to(1);
|
||||
$rdr.advance(1);
|
||||
b
|
||||
} else {
|
||||
return Ok(Async::NotReady)
|
||||
return Poll::Pending
|
||||
}
|
||||
})
|
||||
);
|
||||
@@ -478,7 +515,7 @@ impl ChunkedState {
|
||||
body: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<ChunkedState, io::Error> {
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
use self::ChunkedState::*;
|
||||
match *self {
|
||||
Size => ChunkedState::read_size(body, size),
|
||||
@@ -490,66 +527,83 @@ impl ChunkedState {
|
||||
BodyLf => ChunkedState::read_body_lf(body),
|
||||
EndCr => ChunkedState::read_end_cr(body),
|
||||
EndLf => ChunkedState::read_end_lf(body),
|
||||
End => Ok(Async::Ready(ChunkedState::End)),
|
||||
End => Poll::Ready(Ok(ChunkedState::End)),
|
||||
}
|
||||
}
|
||||
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<ChunkedState, io::Error> {
|
||||
|
||||
fn read_size(
|
||||
rdr: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
let radix = 16;
|
||||
match byte!(rdr) {
|
||||
b @ b'0'...b'9' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b - b'0');
|
||||
}
|
||||
b @ b'a'...b'f' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b + 10 - b'a');
|
||||
}
|
||||
b @ b'A'...b'F' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b + 10 - b'A');
|
||||
}
|
||||
b'\t' | b' ' => return Ok(Async::Ready(ChunkedState::SizeLws)),
|
||||
b';' => return Ok(Async::Ready(ChunkedState::Extension)),
|
||||
b'\r' => return Ok(Async::Ready(ChunkedState::SizeLf)),
|
||||
|
||||
let rem = match byte!(rdr) {
|
||||
b @ b'0'..=b'9' => b - b'0',
|
||||
b @ b'a'..=b'f' => b + 10 - b'a',
|
||||
b @ b'A'..=b'F' => b + 10 - b'A',
|
||||
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Invalid Size",
|
||||
));
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match size.checked_mul(radix) {
|
||||
Some(n) => {
|
||||
*size = n as u64;
|
||||
*size += rem as u64;
|
||||
|
||||
Poll::Ready(Ok(ChunkedState::Size))
|
||||
}
|
||||
None => {
|
||||
debug!("chunk size would overflow");
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Invalid Size",
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(Async::Ready(ChunkedState::Size))
|
||||
}
|
||||
fn read_size_lws(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
|
||||
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
trace!("read_size_lws");
|
||||
match byte!(rdr) {
|
||||
// LWS can follow the chunk size, but no more digits can come
|
||||
b'\t' | b' ' => Ok(Async::Ready(ChunkedState::SizeLws)),
|
||||
b';' => Ok(Async::Ready(ChunkedState::Extension)),
|
||||
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size linear white space",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_extension(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
|
||||
_ => Ok(Async::Ready(ChunkedState::Extension)), // no supported extensions
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
|
||||
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid character in chunk extension",
|
||||
))),
|
||||
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
|
||||
}
|
||||
}
|
||||
fn read_size_lf(
|
||||
rdr: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
) -> Poll<ChunkedState, io::Error> {
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)),
|
||||
b'\n' if *size == 0 => Ok(Async::Ready(ChunkedState::EndCr)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
|
||||
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size LF",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,16 +611,16 @@ impl ChunkedState {
|
||||
rdr: &mut BytesMut,
|
||||
rem: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<ChunkedState, io::Error> {
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
trace!("Chunked read, remaining={:?}", rem);
|
||||
|
||||
let len = rdr.len() as u64;
|
||||
if len == 0 {
|
||||
Ok(Async::Ready(ChunkedState::Body))
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
let slice;
|
||||
if *rem > len {
|
||||
slice = rdr.take().freeze();
|
||||
slice = rdr.split().freeze();
|
||||
*rem -= len;
|
||||
} else {
|
||||
slice = rdr.split_to(*rem as usize).freeze();
|
||||
@@ -574,47 +628,47 @@ impl ChunkedState {
|
||||
}
|
||||
*buf = Some(slice);
|
||||
if *rem > 0 {
|
||||
Ok(Async::Ready(ChunkedState::Body))
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
Ok(Async::Ready(ChunkedState::BodyCr))
|
||||
Poll::Ready(Ok(ChunkedState::BodyCr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_body_cr(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Ok(Async::Ready(ChunkedState::BodyLf)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body CR",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_body_lf(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Ok(Async::Ready(ChunkedState::Size)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body LF",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_cr(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Ok(Async::Ready(ChunkedState::EndLf)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end CR",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_lf(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
|
||||
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Ok(Async::Ready(ChunkedState::End)),
|
||||
_ => Err(io::Error::new(
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end LF",
|
||||
)),
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -638,10 +692,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn is_unhandled(&self) -> bool {
|
||||
match self {
|
||||
PayloadType::Stream(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self, PayloadType::Stream(_))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -653,10 +704,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
fn eof(&self) -> bool {
|
||||
match *self {
|
||||
PayloadItem::Eof => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(*self, PayloadItem::Eof)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -962,18 +1010,12 @@ mod tests {
|
||||
unreachable!("Error");
|
||||
}
|
||||
|
||||
// type in chunked
|
||||
// intentional typo in "chunked"
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chnked\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
if let Ok(val) = req.chunked() {
|
||||
assert!(!val);
|
||||
} else {
|
||||
unreachable!("Error");
|
||||
}
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1023,7 +1065,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_upgrade() {
|
||||
fn test_http_request_upgrade_websocket() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: upgrade\r\n\
|
||||
@@ -1037,6 +1079,26 @@ mod tests {
|
||||
assert!(pl.is_unhandled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_upgrade_h2c() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: upgrade, http2-settings\r\n\
|
||||
upgrade: h2c\r\n\
|
||||
http2-settings: dummy\r\n\r\n",
|
||||
);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
// `connection: upgrade, http2-settings` doesn't work properly..
|
||||
// see MessageType::set_headers().
|
||||
//
|
||||
// The line below should be:
|
||||
// assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
assert!(req.upgrade());
|
||||
assert!(!pl.is_unhandled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_parser_utf8() {
|
||||
let mut buf = BytesMut::from(
|
||||
@@ -1191,4 +1253,16 @@ mod tests {
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(msg.eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_response_http10_read_until_eof() {
|
||||
let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
|
||||
|
||||
let mut reader = MessageDecoder::<ResponseHead>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,22 +1,18 @@
|
||||
#![allow(unused_imports, unused_variables, dead_code)]
|
||||
use std::fmt::Write as FmtWrite;
|
||||
use std::io::Write;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
use std::{cmp, fmt, io, mem};
|
||||
use std::ptr::copy_nonoverlapping;
|
||||
use std::slice::from_raw_parts_mut;
|
||||
use std::{cmp, io};
|
||||
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::header::{map, ContentEncoding};
|
||||
use crate::header::map;
|
||||
use crate::helpers;
|
||||
use crate::http::header::{
|
||||
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
|
||||
};
|
||||
use crate::http::{HeaderMap, Method, StatusCode, Version};
|
||||
use crate::message::{ConnectionType, Head, RequestHead, ResponseHead};
|
||||
use crate::request::Request;
|
||||
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
|
||||
use crate::http::{HeaderMap, StatusCode, Version};
|
||||
use crate::message::{ConnectionType, RequestHeadType};
|
||||
use crate::response::Response;
|
||||
|
||||
const AVERAGE_HEADER_SIZE: usize = 30;
|
||||
@@ -43,6 +39,12 @@ pub(crate) trait MessageType: Sized {
|
||||
|
||||
fn headers(&self) -> &HeaderMap;
|
||||
|
||||
fn extra_headers(&self) -> Option<&HeaderMap>;
|
||||
|
||||
fn camel_case(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn chunked(&self) -> bool;
|
||||
|
||||
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()>;
|
||||
@@ -57,37 +59,46 @@ pub(crate) trait MessageType: Sized {
|
||||
) -> io::Result<()> {
|
||||
let chunked = self.chunked();
|
||||
let mut skip_len = length != BodySize::Stream;
|
||||
let camel_case = self.camel_case();
|
||||
|
||||
// Content length
|
||||
if let Some(status) = self.status() {
|
||||
match status {
|
||||
StatusCode::NO_CONTENT
|
||||
| StatusCode::CONTINUE
|
||||
| StatusCode::PROCESSING => length = BodySize::None,
|
||||
StatusCode::SWITCHING_PROTOCOLS => {
|
||||
StatusCode::CONTINUE
|
||||
| StatusCode::SWITCHING_PROTOCOLS
|
||||
| StatusCode::PROCESSING
|
||||
| StatusCode::NO_CONTENT => {
|
||||
// skip content-length and transfer-encoding headers
|
||||
// See https://tools.ietf.org/html/rfc7230#section-3.3.1
|
||||
// and https://tools.ietf.org/html/rfc7230#section-3.3.2
|
||||
skip_len = true;
|
||||
length = BodySize::Stream;
|
||||
length = BodySize::None
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
match length {
|
||||
BodySize::Stream => {
|
||||
if chunked {
|
||||
dst.put_slice(b"\r\ntransfer-encoding: chunked\r\n")
|
||||
skip_len = true;
|
||||
if camel_case {
|
||||
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
|
||||
} else {
|
||||
dst.put_slice(b"\r\ntransfer-encoding: chunked\r\n")
|
||||
}
|
||||
} else {
|
||||
skip_len = false;
|
||||
dst.put_slice(b"\r\n");
|
||||
}
|
||||
}
|
||||
BodySize::Empty => {
|
||||
dst.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
if camel_case {
|
||||
dst.put_slice(b"\r\nContent-Length: 0\r\n");
|
||||
} else {
|
||||
dst.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
}
|
||||
}
|
||||
BodySize::Sized(len) => helpers::write_content_length(len, dst),
|
||||
BodySize::Sized64(len) => {
|
||||
dst.put_slice(b"\r\ncontent-length: ");
|
||||
write!(dst.writer(), "{}\r\n", len)?;
|
||||
}
|
||||
BodySize::None => dst.put_slice(b"\r\n"),
|
||||
}
|
||||
|
||||
@@ -95,82 +106,160 @@ pub(crate) trait MessageType: Sized {
|
||||
match ctype {
|
||||
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
|
||||
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
|
||||
dst.put_slice(b"connection: keep-alive\r\n")
|
||||
if camel_case {
|
||||
dst.put_slice(b"Connection: keep-alive\r\n")
|
||||
} else {
|
||||
dst.put_slice(b"connection: keep-alive\r\n")
|
||||
}
|
||||
}
|
||||
ConnectionType::Close if version >= Version::HTTP_11 => {
|
||||
dst.put_slice(b"connection: close\r\n")
|
||||
if camel_case {
|
||||
dst.put_slice(b"Connection: close\r\n")
|
||||
} else {
|
||||
dst.put_slice(b"connection: close\r\n")
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
|
||||
let empty_headers = HeaderMap::new();
|
||||
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
|
||||
let headers = self
|
||||
.headers()
|
||||
.inner
|
||||
.iter()
|
||||
.filter(|(name, _)| !extra_headers.contains_key(*name))
|
||||
.chain(extra_headers.inner.iter());
|
||||
|
||||
// write headers
|
||||
let mut pos = 0;
|
||||
|
||||
let mut has_date = false;
|
||||
let mut remaining = dst.remaining_mut();
|
||||
let mut buf = unsafe { &mut *(dst.bytes_mut() as *mut [u8]) };
|
||||
for (key, value) in self.headers().inner.iter() {
|
||||
|
||||
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
let mut remaining = dst.capacity() - dst.len();
|
||||
|
||||
// tracks bytes written since last buffer resize
|
||||
// since buf is a raw pointer to a bytes container storage but is written to without the
|
||||
// container's knowledge, this is used to sync the containers cursor after data is written
|
||||
let mut pos = 0;
|
||||
|
||||
for (key, value) in headers {
|
||||
match *key {
|
||||
CONNECTION => continue,
|
||||
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
|
||||
DATE => {
|
||||
has_date = true;
|
||||
}
|
||||
DATE => has_date = true,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let k = key.as_str().as_bytes();
|
||||
let k_len = k.len();
|
||||
|
||||
match value {
|
||||
map::Value::One(ref val) => {
|
||||
let v = val.as_ref();
|
||||
let len = k.len() + v.len() + 4;
|
||||
let v_len = v.len();
|
||||
|
||||
// key length + value length + colon + space + \r\n
|
||||
let len = k_len + v_len + 4;
|
||||
|
||||
if len > remaining {
|
||||
// not enough room in buffer for this header; reserve more space
|
||||
|
||||
// SAFETY: all the bytes written up to position "pos" are initialized
|
||||
// the written byte count and pointer advancement are kept in sync
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
|
||||
pos = 0;
|
||||
dst.reserve(len * 2);
|
||||
remaining = dst.remaining_mut();
|
||||
unsafe {
|
||||
buf = &mut *(dst.bytes_mut() as *mut _);
|
||||
}
|
||||
remaining = dst.capacity() - dst.len();
|
||||
|
||||
// re-assign buf raw pointer since it's possible that the buffer was
|
||||
// reallocated and/or resized
|
||||
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
}
|
||||
buf[pos..pos + k.len()].copy_from_slice(k);
|
||||
pos += k.len();
|
||||
buf[pos..pos + 2].copy_from_slice(b": ");
|
||||
pos += 2;
|
||||
buf[pos..pos + v.len()].copy_from_slice(v);
|
||||
pos += v.len();
|
||||
buf[pos..pos + 2].copy_from_slice(b"\r\n");
|
||||
pos += 2;
|
||||
|
||||
// SAFETY: on each write, it is enough to ensure that the advancement of the
|
||||
// cursor matches the number of bytes written
|
||||
unsafe {
|
||||
// use upper Camel-Case
|
||||
if camel_case {
|
||||
write_camel_case(k, from_raw_parts_mut(buf, k_len))
|
||||
} else {
|
||||
write_data(k, buf, k_len)
|
||||
}
|
||||
|
||||
buf = buf.add(k_len);
|
||||
|
||||
write_data(b": ", buf, 2);
|
||||
buf = buf.add(2);
|
||||
|
||||
write_data(v, buf, v_len);
|
||||
buf = buf.add(v_len);
|
||||
|
||||
write_data(b"\r\n", buf, 2);
|
||||
buf = buf.add(2);
|
||||
}
|
||||
|
||||
pos += len;
|
||||
remaining -= len;
|
||||
}
|
||||
|
||||
map::Value::Multi(ref vec) => {
|
||||
for val in vec {
|
||||
let v = val.as_ref();
|
||||
let len = k.len() + v.len() + 4;
|
||||
let v_len = v.len();
|
||||
let len = k_len + v_len + 4;
|
||||
|
||||
if len > remaining {
|
||||
// SAFETY: all the bytes written up to position "pos" are initialized
|
||||
// the written byte count and pointer advancement are kept in sync
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
pos = 0;
|
||||
dst.reserve(len * 2);
|
||||
remaining = dst.remaining_mut();
|
||||
unsafe {
|
||||
buf = &mut *(dst.bytes_mut() as *mut _);
|
||||
}
|
||||
remaining = dst.capacity() - dst.len();
|
||||
|
||||
// re-assign buf raw pointer since it's possible that the buffer was
|
||||
// reallocated and/or resized
|
||||
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
}
|
||||
buf[pos..pos + k.len()].copy_from_slice(k);
|
||||
pos += k.len();
|
||||
buf[pos..pos + 2].copy_from_slice(b": ");
|
||||
pos += 2;
|
||||
buf[pos..pos + v.len()].copy_from_slice(v);
|
||||
pos += v.len();
|
||||
buf[pos..pos + 2].copy_from_slice(b"\r\n");
|
||||
pos += 2;
|
||||
|
||||
// SAFETY: on each write, it is enough to ensure that the advancement of
|
||||
// the cursor matches the number of bytes written
|
||||
unsafe {
|
||||
if camel_case {
|
||||
write_camel_case(k, from_raw_parts_mut(buf, k_len));
|
||||
} else {
|
||||
write_data(k, buf, k_len);
|
||||
}
|
||||
|
||||
buf = buf.add(k_len);
|
||||
|
||||
write_data(b": ", buf, 2);
|
||||
buf = buf.add(2);
|
||||
|
||||
write_data(v, buf, v_len);
|
||||
buf = buf.add(v_len);
|
||||
|
||||
write_data(b"\r\n", buf, 2);
|
||||
buf = buf.add(2);
|
||||
};
|
||||
|
||||
pos += len;
|
||||
remaining -= len;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// final cursor synchronization with the bytes container
|
||||
//
|
||||
// SAFETY: all the bytes written up to position "pos" are initialized
|
||||
// the written byte count and pointer advancement are kept in sync
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
@@ -200,6 +289,10 @@ impl MessageType for Response<()> {
|
||||
&self.head().headers
|
||||
}
|
||||
|
||||
fn extra_headers(&self) -> Option<&HeaderMap> {
|
||||
None
|
||||
}
|
||||
|
||||
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
|
||||
let head = self.head();
|
||||
let reason = head.reason().as_bytes();
|
||||
@@ -212,31 +305,46 @@ impl MessageType for Response<()> {
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageType for RequestHead {
|
||||
impl MessageType for RequestHeadType {
|
||||
fn status(&self) -> Option<StatusCode> {
|
||||
None
|
||||
}
|
||||
|
||||
fn chunked(&self) -> bool {
|
||||
self.chunked()
|
||||
self.as_ref().chunked()
|
||||
}
|
||||
|
||||
fn camel_case(&self) -> bool {
|
||||
self.as_ref().camel_case_headers()
|
||||
}
|
||||
|
||||
fn headers(&self) -> &HeaderMap {
|
||||
&self.headers
|
||||
self.as_ref().headers()
|
||||
}
|
||||
|
||||
fn extra_headers(&self) -> Option<&HeaderMap> {
|
||||
self.extra_headers()
|
||||
}
|
||||
|
||||
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
|
||||
dst.reserve(256 + self.headers.len() * AVERAGE_HEADER_SIZE);
|
||||
let head = self.as_ref();
|
||||
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
|
||||
write!(
|
||||
Writer(dst),
|
||||
"{} {} {}",
|
||||
self.method,
|
||||
self.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
|
||||
match self.version {
|
||||
head.method,
|
||||
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
|
||||
match head.version {
|
||||
Version::HTTP_09 => "HTTP/0.9",
|
||||
Version::HTTP_10 => "HTTP/1.0",
|
||||
Version::HTTP_11 => "HTTP/1.1",
|
||||
Version::HTTP_2 => "HTTP/2.0",
|
||||
Version::HTTP_3 => "HTTP/3.0",
|
||||
_ =>
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"unsupported version"
|
||||
)),
|
||||
}
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
@@ -269,8 +377,7 @@ impl<T: MessageType> MessageEncoder<T> {
|
||||
if !head {
|
||||
self.te = match length {
|
||||
BodySize::Empty => TransferEncoding::empty(),
|
||||
BodySize::Sized(len) => TransferEncoding::length(len as u64),
|
||||
BodySize::Sized64(len) => TransferEncoding::length(len),
|
||||
BodySize::Sized(len) => TransferEncoding::length(len),
|
||||
BodySize::Stream => {
|
||||
if message.chunked() && !stream {
|
||||
TransferEncoding::chunked()
|
||||
@@ -418,10 +525,51 @@ impl<'a> io::Write for Writer<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
/// Callers must ensure that the given length matches given value length.
|
||||
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
|
||||
debug_assert_eq!(value.len(), len);
|
||||
copy_nonoverlapping(value.as_ptr(), buf, len);
|
||||
}
|
||||
|
||||
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
|
||||
let mut index = 0;
|
||||
let key = value;
|
||||
let mut key_iter = key.iter();
|
||||
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
while let Some(c) = key_iter.next() {
|
||||
buffer[index] = *c;
|
||||
index += 1;
|
||||
if *c == b'-' {
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::rc::Rc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::header::AUTHORIZATION;
|
||||
|
||||
use super::*;
|
||||
use crate::http::header::{HeaderValue, CONTENT_TYPE};
|
||||
use crate::RequestHead;
|
||||
|
||||
#[test]
|
||||
fn test_chunked_te() {
|
||||
@@ -432,8 +580,128 @@ mod tests {
|
||||
assert!(enc.encode(b"", &mut bytes).ok().unwrap());
|
||||
}
|
||||
assert_eq!(
|
||||
bytes.take().freeze(),
|
||||
bytes.split().freeze(),
|
||||
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_camel_case() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
let mut head = RequestHead::default();
|
||||
head.set_camel_case_headers(true);
|
||||
head.headers.insert(DATE, HeaderValue::from_static("date"));
|
||||
head.headers
|
||||
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
|
||||
|
||||
let mut head = RequestHeadType::Owned(head);
|
||||
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Empty,
|
||||
ConnectionType::Close,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("Content-Length: 0\r\n"));
|
||||
assert!(data.contains("Connection: close\r\n"));
|
||||
assert!(data.contains("Content-Type: plain/text\r\n"));
|
||||
assert!(data.contains("Date: date\r\n"));
|
||||
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Stream,
|
||||
ConnectionType::KeepAlive,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
|
||||
assert!(data.contains("Content-Type: plain/text\r\n"));
|
||||
assert!(data.contains("Date: date\r\n"));
|
||||
|
||||
let mut head = RequestHead::default();
|
||||
head.set_camel_case_headers(false);
|
||||
head.headers.insert(DATE, HeaderValue::from_static("date"));
|
||||
head.headers
|
||||
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
|
||||
head.headers
|
||||
.append(CONTENT_TYPE, HeaderValue::from_static("xml"));
|
||||
|
||||
let mut head = RequestHeadType::Owned(head);
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Stream,
|
||||
ConnectionType::KeepAlive,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("transfer-encoding: chunked\r\n"));
|
||||
assert!(data.contains("content-type: xml\r\n"));
|
||||
assert!(data.contains("content-type: plain/text\r\n"));
|
||||
assert!(data.contains("date: date\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extra_headers() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
|
||||
let mut head = RequestHead::default();
|
||||
head.headers.insert(
|
||||
AUTHORIZATION,
|
||||
HeaderValue::from_static("some authorization"),
|
||||
);
|
||||
|
||||
let mut extra_headers = HeaderMap::new();
|
||||
extra_headers.insert(
|
||||
AUTHORIZATION,
|
||||
HeaderValue::from_static("another authorization"),
|
||||
);
|
||||
extra_headers.insert(DATE, HeaderValue::from_static("date"));
|
||||
|
||||
let mut head = RequestHeadType::Rc(Rc::new(head), Some(extra_headers));
|
||||
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Empty,
|
||||
ConnectionType::Close,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("content-length: 0\r\n"));
|
||||
assert!(data.contains("connection: close\r\n"));
|
||||
assert!(data.contains("authorization: another authorization\r\n"));
|
||||
assert!(data.contains("date: date\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_content_length() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
|
||||
let mut res: Response<()> =
|
||||
Response::new(StatusCode::SWITCHING_PROTOCOLS).into_body::<()>();
|
||||
res.headers_mut()
|
||||
.insert(DATE, HeaderValue::from_static(&""));
|
||||
res.headers_mut()
|
||||
.insert(CONTENT_LENGTH, HeaderValue::from_static(&"0"));
|
||||
|
||||
let _ = res.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Stream,
|
||||
ConnectionType::Upgrade,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(!data.contains("content-length: 0\r\n"));
|
||||
assert!(!data.contains("transfer-encoding: chunked\r\n"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
use actix_service::{NewService, Service};
|
||||
use futures::future::{ok, FutureResult};
|
||||
use futures::{Async, Poll};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{ok, Ready};
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::request::Request;
|
||||
|
||||
pub struct ExpectHandler;
|
||||
|
||||
impl NewService for ExpectHandler {
|
||||
impl ServiceFactory for ExpectHandler {
|
||||
type Config = ();
|
||||
type Request = Request;
|
||||
type Response = Request;
|
||||
type Error = Error;
|
||||
type Service = ExpectHandler;
|
||||
type InitError = Error;
|
||||
type Future = FutureResult<Self::Service, Self::InitError>;
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: &()) -> Self::Future {
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(ExpectHandler)
|
||||
}
|
||||
}
|
||||
@@ -24,10 +26,10 @@ impl Service for ExpectHandler {
|
||||
type Request = Request;
|
||||
type Response = Request;
|
||||
type Error = Error;
|
||||
type Future = FutureResult<Self::Response, Self::Error>;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
Ok(Async::Ready(()))
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
|
||||
@@ -9,6 +9,8 @@ mod encoder;
|
||||
mod expect;
|
||||
mod payload;
|
||||
mod service;
|
||||
mod upgrade;
|
||||
mod utils;
|
||||
|
||||
pub use self::client::{ClientCodec, ClientPayloadCodec};
|
||||
pub use self::codec::Codec;
|
||||
@@ -16,6 +18,8 @@ pub use self::dispatcher::Dispatcher;
|
||||
pub use self::expect::ExpectHandler;
|
||||
pub use self::payload::Payload;
|
||||
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
|
||||
pub use self::upgrade::UpgradeHandler;
|
||||
pub use self::utils::SendResponse;
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Codec message
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
//! Payload stream
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::pin::Pin;
|
||||
use std::rc::{Rc, Weak};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_utils::task::LocalWaker;
|
||||
use bytes::Bytes;
|
||||
use futures::task::current as current_task;
|
||||
use futures::task::Task;
|
||||
use futures::{Async, Poll, Stream};
|
||||
use futures_core::Stream;
|
||||
|
||||
use crate::error::PayloadError;
|
||||
|
||||
@@ -77,15 +78,24 @@ impl Payload {
|
||||
pub fn unread_data(&mut self, data: Bytes) {
|
||||
self.inner.borrow_mut().unread_data(data);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn readany(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, PayloadError>>> {
|
||||
self.inner.borrow_mut().readany(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for Payload {
|
||||
type Item = Bytes;
|
||||
type Error = PayloadError;
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
#[inline]
|
||||
fn poll(&mut self) -> Poll<Option<Bytes>, PayloadError> {
|
||||
self.inner.borrow_mut().readany()
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, PayloadError>>> {
|
||||
self.inner.borrow_mut().readany(cx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,19 +127,14 @@ impl PayloadSender {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn need_read(&self) -> PayloadStatus {
|
||||
pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus {
|
||||
// we check need_read only if Payload (other side) is alive,
|
||||
// otherwise always return true (consume payload)
|
||||
if let Some(shared) = self.inner.upgrade() {
|
||||
if shared.borrow().need_read {
|
||||
PayloadStatus::Read
|
||||
} else {
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
if shared.borrow_mut().io_task.is_none() {
|
||||
shared.borrow_mut().io_task = Some(current_task());
|
||||
}
|
||||
}
|
||||
shared.borrow_mut().io_task.register(cx.waker());
|
||||
PayloadStatus::Pause
|
||||
}
|
||||
} else {
|
||||
@@ -145,8 +150,8 @@ struct Inner {
|
||||
err: Option<PayloadError>,
|
||||
need_read: bool,
|
||||
items: VecDeque<Bytes>,
|
||||
task: Option<Task>,
|
||||
io_task: Option<Task>,
|
||||
task: LocalWaker,
|
||||
io_task: LocalWaker,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
@@ -157,8 +162,8 @@ impl Inner {
|
||||
err: None,
|
||||
items: VecDeque::new(),
|
||||
need_read: true,
|
||||
task: None,
|
||||
io_task: None,
|
||||
task: LocalWaker::new(),
|
||||
io_task: LocalWaker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +183,7 @@ impl Inner {
|
||||
self.items.push_back(data);
|
||||
self.need_read = self.len < MAX_BUFFER_SIZE;
|
||||
if let Some(task) = self.task.take() {
|
||||
task.notify()
|
||||
task.wake()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,34 +192,28 @@ impl Inner {
|
||||
self.len
|
||||
}
|
||||
|
||||
fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
|
||||
fn readany(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, PayloadError>>> {
|
||||
if let Some(data) = self.items.pop_front() {
|
||||
self.len -= data.len();
|
||||
self.need_read = self.len < MAX_BUFFER_SIZE;
|
||||
|
||||
if self.need_read && self.task.is_none() && !self.eof {
|
||||
self.task = Some(current_task());
|
||||
if self.need_read && !self.eof {
|
||||
self.task.register(cx.waker());
|
||||
}
|
||||
if let Some(task) = self.io_task.take() {
|
||||
task.notify()
|
||||
}
|
||||
Ok(Async::Ready(Some(data)))
|
||||
self.io_task.wake();
|
||||
Poll::Ready(Some(Ok(data)))
|
||||
} else if let Some(err) = self.err.take() {
|
||||
Err(err)
|
||||
Poll::Ready(Some(Err(err)))
|
||||
} else if self.eof {
|
||||
Ok(Async::Ready(None))
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
self.need_read = true;
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
if self.task.is_none() {
|
||||
self.task = Some(current_task());
|
||||
}
|
||||
if let Some(task) = self.io_task.take() {
|
||||
task.notify()
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
self.task.register(cx.waker());
|
||||
self.io_task.wake();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,28 +226,19 @@ impl Inner {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use actix_rt::Runtime;
|
||||
use futures::future::{lazy, result};
|
||||
use futures_util::future::poll_fn;
|
||||
|
||||
#[test]
|
||||
fn test_unread_data() {
|
||||
Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(lazy(|| {
|
||||
let (_, mut payload) = Payload::create(false);
|
||||
#[actix_rt::test]
|
||||
async fn test_unread_data() {
|
||||
let (_, mut payload) = Payload::create(false);
|
||||
|
||||
payload.unread_data(Bytes::from("data"));
|
||||
assert!(!payload.is_empty());
|
||||
assert_eq!(payload.len(), 4);
|
||||
payload.unread_data(Bytes::from("data"));
|
||||
assert!(!payload.is_empty());
|
||||
assert_eq!(payload.len(), 4);
|
||||
|
||||
assert_eq!(
|
||||
Async::Ready(Some(Bytes::from("data"))),
|
||||
payload.poll().ok().unwrap()
|
||||
);
|
||||
|
||||
let res: Result<(), ()> = Ok(());
|
||||
result(res)
|
||||
}))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
Bytes::from("data"),
|
||||
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,112 +1,291 @@
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::rc::Rc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, net};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use actix_server_config::{Io, ServerConfig as SrvConfig};
|
||||
use actix_service::{IntoNewService, NewService, Service};
|
||||
use actix_utils::cloneable::CloneableService;
|
||||
use futures::future::{ok, FutureResult};
|
||||
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
|
||||
use futures_core::ready;
|
||||
use futures_util::future::{ok, Ready};
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::config::{KeepAlive, ServiceConfig};
|
||||
use crate::cloneable::CloneableService;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::{DispatchError, Error, ParseError};
|
||||
use crate::helpers::DataFactory;
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use crate::{ConnectCallback, Extensions};
|
||||
|
||||
use super::codec::Codec;
|
||||
use super::dispatcher::Dispatcher;
|
||||
use super::{ExpectHandler, Message};
|
||||
use super::{ExpectHandler, Message, UpgradeHandler};
|
||||
|
||||
/// `NewService` implementation for HTTP1 transport
|
||||
pub struct H1Service<T, P, S, B, X = ExpectHandler> {
|
||||
/// `ServiceFactory` implementation for HTTP1 transport
|
||||
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
|
||||
srv: S,
|
||||
cfg: ServiceConfig,
|
||||
expect: X,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
upgrade: Option<U>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B> H1Service<T, P, S, B>
|
||||
impl<T, S, B> H1Service<T, S, B>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
{
|
||||
/// Create new `HttpService` instance with default config.
|
||||
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
|
||||
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
|
||||
|
||||
H1Service {
|
||||
cfg,
|
||||
srv: service.into_new_service(),
|
||||
expect: ExpectHandler,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new `HttpService` instance with config.
|
||||
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
|
||||
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
|
||||
cfg: ServiceConfig,
|
||||
service: F,
|
||||
) -> Self {
|
||||
H1Service {
|
||||
cfg,
|
||||
srv: service.into_new_service(),
|
||||
srv: service.into_factory(),
|
||||
expect: ExpectHandler,
|
||||
upgrade: None,
|
||||
on_connect: None,
|
||||
on_connect_ext: None,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P, S, B, X> H1Service<T, P, S, B, X>
|
||||
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<
|
||||
Config = (),
|
||||
Request = (Request, Framed<TcpStream, Codec>),
|
||||
Response = (),
|
||||
>,
|
||||
U::Error: fmt::Display + Into<Error>,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create simple tcp stream service
|
||||
pub fn tcp(
|
||||
self,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = DispatchError,
|
||||
InitError = (),
|
||||
> {
|
||||
pipeline_factory(|io: TcpStream| {
|
||||
let peer_addr = io.peer_addr().ok();
|
||||
ok((io, peer_addr))
|
||||
})
|
||||
.and_then(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
mod openssl {
|
||||
use super::*;
|
||||
|
||||
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
|
||||
use actix_tls::{openssl::HandshakeError, TlsError};
|
||||
|
||||
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<
|
||||
Config = (),
|
||||
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
|
||||
Response = (),
|
||||
>,
|
||||
U::Error: fmt::Display + Into<Error>,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create openssl based service
|
||||
pub fn openssl(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
|
||||
InitError = (),
|
||||
> {
|
||||
pipeline_factory(
|
||||
Acceptor::new(acceptor)
|
||||
.map_err(TlsError::Tls)
|
||||
.map_init_err(|_| panic!()),
|
||||
)
|
||||
.and_then(|io: SslStream<TcpStream>| {
|
||||
let peer_addr = io.get_ref().peer_addr().ok();
|
||||
ok((io, peer_addr))
|
||||
})
|
||||
.and_then(self.map_err(TlsError::Service))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
mod rustls {
|
||||
use super::*;
|
||||
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
|
||||
use actix_tls::TlsError;
|
||||
use std::{fmt, io};
|
||||
|
||||
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<
|
||||
Config = (),
|
||||
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
|
||||
Response = (),
|
||||
>,
|
||||
U::Error: fmt::Display + Into<Error>,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create rustls based service
|
||||
pub fn rustls(
|
||||
self,
|
||||
config: ServerConfig,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = TlsError<io::Error, DispatchError>,
|
||||
InitError = (),
|
||||
> {
|
||||
pipeline_factory(
|
||||
Acceptor::new(config)
|
||||
.map_err(TlsError::Tls)
|
||||
.map_init_err(|_| panic!()),
|
||||
)
|
||||
.and_then(|io: TlsStream<TcpStream>| {
|
||||
let peer_addr = io.get_ref().0.peer_addr().ok();
|
||||
ok((io, peer_addr))
|
||||
})
|
||||
.and_then(self.map_err(TlsError::Service))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::InitError: fmt::Debug,
|
||||
B: MessageBody,
|
||||
{
|
||||
pub fn expect<U>(self, expect: U) -> H1Service<T, P, S, B, U>
|
||||
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
|
||||
where
|
||||
U: NewService<Request = Request, Response = Request>,
|
||||
U::Error: Into<Error>,
|
||||
U::InitError: fmt::Debug,
|
||||
X1: ServiceFactory<Request = Request, Response = Request>,
|
||||
X1::Error: Into<Error>,
|
||||
X1::InitError: fmt::Debug,
|
||||
{
|
||||
H1Service {
|
||||
expect,
|
||||
cfg: self.cfg,
|
||||
srv: self.srv,
|
||||
upgrade: self.upgrade,
|
||||
on_connect: self.on_connect,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
|
||||
where
|
||||
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U1::Error: fmt::Display,
|
||||
U1::InitError: fmt::Debug,
|
||||
{
|
||||
H1Service {
|
||||
upgrade,
|
||||
cfg: self.cfg,
|
||||
srv: self.srv,
|
||||
expect: self.expect,
|
||||
on_connect: self.on_connect,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set on connect callback.
|
||||
pub(crate) fn on_connect(
|
||||
mut self,
|
||||
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
) -> Self {
|
||||
self.on_connect = f;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set on connect callback.
|
||||
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
|
||||
self.on_connect_ext = f;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P, S, B, X> NewService<SrvConfig> for H1Service<T, P, S, B, X>
|
||||
impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::InitError: fmt::Debug,
|
||||
B: MessageBody,
|
||||
X: NewService<Request = Request, Response = Request>,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display + Into<Error>,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Config = ();
|
||||
type Request = (T, Option<net::SocketAddr>);
|
||||
type Response = ();
|
||||
type Error = DispatchError;
|
||||
type InitError = ();
|
||||
type Service = H1ServiceHandler<T, P, S::Service, B, X::Service>;
|
||||
type Future = H1ServiceResponse<T, P, S, B, X>;
|
||||
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
|
||||
type Future = H1ServiceResponse<T, S, B, X, U>;
|
||||
|
||||
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
H1ServiceResponse {
|
||||
fut: self.srv.new_service(cfg).into_future(),
|
||||
fut_ex: Some(self.expect.new_service(&())),
|
||||
fut: self.srv.new_service(()),
|
||||
fut_ex: Some(self.expect.new_service(())),
|
||||
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
|
||||
expect: None,
|
||||
upgrade: None,
|
||||
on_connect: self.on_connect.clone(),
|
||||
on_connect_ext: self.on_connect_ext.clone(),
|
||||
cfg: Some(self.cfg.clone()),
|
||||
_t: PhantomData,
|
||||
}
|
||||
@@ -114,67 +293,103 @@ where
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct H1ServiceResponse<T, P, S, B, X>
|
||||
#[pin_project::pin_project]
|
||||
pub struct H1ServiceResponse<T, S, B, X, U>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S: ServiceFactory<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::InitError: fmt::Debug,
|
||||
X: NewService<Request = Request, Response = Request>,
|
||||
X: ServiceFactory<Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
#[pin]
|
||||
fut: S::Future,
|
||||
#[pin]
|
||||
fut_ex: Option<X::Future>,
|
||||
#[pin]
|
||||
fut_upg: Option<U::Future>,
|
||||
expect: Option<X::Service>,
|
||||
upgrade: Option<U::Service>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
cfg: Option<ServiceConfig>,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B, X> Future for H1ServiceResponse<T, P, S, B, X>
|
||||
impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: ServiceFactory<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::InitError: fmt::Debug,
|
||||
B: MessageBody,
|
||||
X: NewService<Request = Request, Response = Request>,
|
||||
X: ServiceFactory<Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
X::InitError: fmt::Debug,
|
||||
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
type Item = H1ServiceHandler<T, P, S::Service, B, X::Service>;
|
||||
type Error = ();
|
||||
type Output = Result<H1ServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
if let Some(ref mut fut) = self.fut_ex {
|
||||
let expect = try_ready!(fut
|
||||
.poll()
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e)));
|
||||
self.expect = Some(expect);
|
||||
self.fut_ex.take();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut this = self.as_mut().project();
|
||||
|
||||
if let Some(fut) = this.fut_ex.as_pin_mut() {
|
||||
let expect = ready!(fut
|
||||
.poll(cx)
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
|
||||
this = self.as_mut().project();
|
||||
*this.expect = Some(expect);
|
||||
this.fut_ex.set(None);
|
||||
}
|
||||
|
||||
let service = try_ready!(self
|
||||
if let Some(fut) = this.fut_upg.as_pin_mut() {
|
||||
let upgrade = ready!(fut
|
||||
.poll(cx)
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
|
||||
this = self.as_mut().project();
|
||||
*this.upgrade = Some(upgrade);
|
||||
this.fut_ex.set(None);
|
||||
}
|
||||
|
||||
let result = ready!(this
|
||||
.fut
|
||||
.poll()
|
||||
.poll(cx)
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e)));
|
||||
Ok(Async::Ready(H1ServiceHandler::new(
|
||||
self.cfg.take().unwrap(),
|
||||
service,
|
||||
self.expect.take().unwrap(),
|
||||
)))
|
||||
|
||||
Poll::Ready(result.map(|service| {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
H1ServiceHandler::new(
|
||||
this.cfg.take().unwrap(),
|
||||
service,
|
||||
this.expect.take().unwrap(),
|
||||
this.upgrade.take(),
|
||||
this.on_connect.clone(),
|
||||
this.on_connect_ext.clone(),
|
||||
)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// `Service` implementation for HTTP1 transport
|
||||
pub struct H1ServiceHandler<T, P, S, B, X> {
|
||||
/// `Service` implementation for HTTP/1 transport
|
||||
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
|
||||
srv: CloneableService<S>,
|
||||
expect: CloneableService<X>,
|
||||
upgrade: Option<CloneableService<U>>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
cfg: ServiceConfig,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B, X> H1ServiceHandler<T, P, S, B, X>
|
||||
impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
|
||||
where
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
@@ -182,36 +397,50 @@ where
|
||||
B: MessageBody,
|
||||
X: Service<Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
{
|
||||
fn new(cfg: ServiceConfig, srv: S, expect: X) -> H1ServiceHandler<T, P, S, B, X> {
|
||||
fn new(
|
||||
cfg: ServiceConfig,
|
||||
srv: S,
|
||||
expect: X,
|
||||
upgrade: Option<U>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
) -> H1ServiceHandler<T, S, B, X, U> {
|
||||
H1ServiceHandler {
|
||||
srv: CloneableService::new(srv),
|
||||
expect: CloneableService::new(expect),
|
||||
upgrade: upgrade.map(CloneableService::new),
|
||||
cfg,
|
||||
on_connect,
|
||||
on_connect_ext,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P, S, B, X> Service for H1ServiceHandler<T, P, S, B, X>
|
||||
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
X: Service<Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U::Error: fmt::Display + Into<Error>,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Request = (T, Option<net::SocketAddr>);
|
||||
type Response = ();
|
||||
type Error = DispatchError;
|
||||
type Future = Dispatcher<T, S, B, X>;
|
||||
type Future = Dispatcher<T, S, B, X, U>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let ready = self
|
||||
.expect
|
||||
.poll_ready()
|
||||
.poll_ready(cx)
|
||||
.map_err(|e| {
|
||||
let e = e.into();
|
||||
log::error!("Http service readiness error: {:?}", e);
|
||||
@@ -221,7 +450,7 @@ where
|
||||
|
||||
let ready = self
|
||||
.srv
|
||||
.poll_ready()
|
||||
.poll_ready(cx)
|
||||
.map_err(|e| {
|
||||
let e = e.into();
|
||||
log::error!("Http service readiness error: {:?}", e);
|
||||
@@ -230,33 +459,58 @@ where
|
||||
.is_ready()
|
||||
&& ready;
|
||||
|
||||
if ready {
|
||||
Ok(Async::Ready(()))
|
||||
let ready = if let Some(ref mut upg) = self.upgrade {
|
||||
upg.poll_ready(cx)
|
||||
.map_err(|e| {
|
||||
let e = e.into();
|
||||
log::error!("Http service readiness error: {:?}", e);
|
||||
DispatchError::Service(e)
|
||||
})?
|
||||
.is_ready()
|
||||
&& ready
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
ready
|
||||
};
|
||||
|
||||
if ready {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Self::Request) -> Self::Future {
|
||||
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
|
||||
let deprecated_on_connect = self.on_connect.as_ref().map(|handler| handler(&io));
|
||||
|
||||
let mut connect_extensions = Extensions::new();
|
||||
if let Some(ref handler) = self.on_connect_ext {
|
||||
// run on_connect_ext callback, populating connect extensions
|
||||
handler(&io, &mut connect_extensions);
|
||||
}
|
||||
|
||||
Dispatcher::new(
|
||||
req.into_parts().0,
|
||||
io,
|
||||
self.cfg.clone(),
|
||||
self.srv.clone(),
|
||||
self.expect.clone(),
|
||||
self.upgrade.clone(),
|
||||
deprecated_on_connect,
|
||||
connect_extensions,
|
||||
addr,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// `NewService` implementation for `OneRequestService` service
|
||||
/// `ServiceFactory` implementation for `OneRequestService` service
|
||||
#[derive(Default)]
|
||||
pub struct OneRequest<T, P> {
|
||||
pub struct OneRequest<T> {
|
||||
config: ServiceConfig,
|
||||
_t: PhantomData<(T, P)>,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T, P> OneRequest<T, P>
|
||||
impl<T> OneRequest<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
/// Create new `H1SimpleService` instance.
|
||||
pub fn new() -> Self {
|
||||
@@ -267,80 +521,82 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P> NewService<SrvConfig> for OneRequest<T, P>
|
||||
impl<T> ServiceFactory for OneRequest<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Config = ();
|
||||
type Request = T;
|
||||
type Response = (Request, Framed<T, Codec>);
|
||||
type Error = ParseError;
|
||||
type InitError = ();
|
||||
type Service = OneRequestService<T, P>;
|
||||
type Future = FutureResult<Self::Service, Self::InitError>;
|
||||
type Service = OneRequestService<T>;
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: &SrvConfig) -> Self::Future {
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(OneRequestService {
|
||||
config: self.config.clone(),
|
||||
_t: PhantomData,
|
||||
config: self.config.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// `Service` implementation for HTTP1 transport. Reads one request and returns
|
||||
/// request and framed object.
|
||||
pub struct OneRequestService<T, P> {
|
||||
pub struct OneRequestService<T> {
|
||||
_t: PhantomData<T>,
|
||||
config: ServiceConfig,
|
||||
_t: PhantomData<(T, P)>,
|
||||
}
|
||||
|
||||
impl<T, P> Service for OneRequestService<T, P>
|
||||
impl<T> Service for OneRequestService<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Request = T;
|
||||
type Response = (Request, Framed<T, Codec>);
|
||||
type Error = ParseError;
|
||||
type Future = OneRequestServiceResponse<T>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
Ok(Async::Ready(()))
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Self::Request) -> Self::Future {
|
||||
OneRequestServiceResponse {
|
||||
framed: Some(Framed::new(
|
||||
req.into_parts().0,
|
||||
Codec::new(self.config.clone()),
|
||||
)),
|
||||
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[pin_project::pin_project]
|
||||
pub struct OneRequestServiceResponse<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
#[pin]
|
||||
framed: Option<Framed<T, Codec>>,
|
||||
}
|
||||
|
||||
impl<T> Future for OneRequestServiceResponse<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Item = (Request, Framed<T, Codec>);
|
||||
type Error = ParseError;
|
||||
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.framed.as_mut().unwrap().poll()? {
|
||||
Async::Ready(Some(req)) => match req {
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
|
||||
Some(Ok(req)) => match req {
|
||||
Message::Item(req) => {
|
||||
Ok(Async::Ready((req, self.framed.take().unwrap())))
|
||||
let mut this = self.as_mut().project();
|
||||
Poll::Ready(Ok((req, this.framed.take().unwrap())))
|
||||
}
|
||||
Message::Chunk(_) => unreachable!("Something is wrong"),
|
||||
},
|
||||
Async::Ready(None) => Err(ParseError::Incomplete),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Some(Err(err)) => Poll::Ready(Err(err)),
|
||||
None => Poll::Ready(Err(ParseError::Incomplete)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
41
actix-http/src/h1/upgrade.rs
Normal file
41
actix-http/src/h1/upgrade.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_codec::Framed;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::Ready;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::h1::Codec;
|
||||
use crate::request::Request;
|
||||
|
||||
pub struct UpgradeHandler<T>(PhantomData<T>);
|
||||
|
||||
impl<T> ServiceFactory for UpgradeHandler<T> {
|
||||
type Config = ();
|
||||
type Request = (Request, Framed<T, Codec>);
|
||||
type Response = ();
|
||||
type Error = Error;
|
||||
type Service = UpgradeHandler<T>;
|
||||
type InitError = Error;
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Service for UpgradeHandler<T> {
|
||||
type Request = (Request, Framed<T, Codec>);
|
||||
type Response = ();
|
||||
type Error = Error;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, _: Self::Request) -> Self::Future {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
115
actix-http/src/h1/utils.rs
Normal file
115
actix-http/src/h1/utils.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
|
||||
use crate::body::{BodySize, MessageBody, ResponseBody};
|
||||
use crate::error::Error;
|
||||
use crate::h1::{Codec, Message};
|
||||
use crate::response::Response;
|
||||
|
||||
/// Send HTTP/1 response
|
||||
#[pin_project::pin_project]
|
||||
pub struct SendResponse<T, B> {
|
||||
res: Option<Message<(Response<()>, BodySize)>>,
|
||||
#[pin]
|
||||
body: Option<ResponseBody<B>>,
|
||||
#[pin]
|
||||
framed: Option<Framed<T, Codec>>,
|
||||
}
|
||||
|
||||
impl<T, B> SendResponse<T, B>
|
||||
where
|
||||
B: MessageBody,
|
||||
{
|
||||
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
|
||||
let (res, body) = response.into_parts();
|
||||
|
||||
SendResponse {
|
||||
res: Some((res, body.size()).into()),
|
||||
body: Some(body),
|
||||
framed: Some(framed),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B> Future for SendResponse<T, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
B: MessageBody + Unpin,
|
||||
{
|
||||
type Output = Result<Framed<T, Codec>, Error>;
|
||||
|
||||
// TODO: rethink if we need loops in polls
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut this = self.as_mut().project();
|
||||
|
||||
let mut body_done = this.body.is_none();
|
||||
loop {
|
||||
let mut body_ready = !body_done;
|
||||
|
||||
// send body
|
||||
if this.res.is_none() && body_ready {
|
||||
while body_ready
|
||||
&& !body_done
|
||||
&& !this
|
||||
.framed
|
||||
.as_ref()
|
||||
.as_pin_ref()
|
||||
.unwrap()
|
||||
.is_write_buf_full()
|
||||
{
|
||||
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
|
||||
Poll::Ready(item) => {
|
||||
// body is done when item is None
|
||||
body_done = item.is_none();
|
||||
if body_done {
|
||||
let _ = this.body.take();
|
||||
}
|
||||
let framed = this.framed.as_mut().as_pin_mut().unwrap();
|
||||
framed.write(Message::Chunk(item))?;
|
||||
}
|
||||
Poll::Pending => body_ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let framed = this.framed.as_mut().as_pin_mut().unwrap();
|
||||
|
||||
// flush write buffer
|
||||
if !framed.is_write_buf_empty() {
|
||||
match framed.flush(cx)? {
|
||||
Poll::Ready(_) => {
|
||||
if body_ready {
|
||||
continue;
|
||||
} else {
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
// send response
|
||||
if let Some(res) = this.res.take() {
|
||||
framed.write(res)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
if !body_done {
|
||||
if body_ready {
|
||||
continue;
|
||||
} else {
|
||||
return Poll::Pending;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let framed = this.framed.take().unwrap();
|
||||
|
||||
Poll::Ready(Ok(framed))
|
||||
}
|
||||
}
|
||||
@@ -1,42 +1,45 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::convert::TryFrom;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Instant;
|
||||
use std::{fmt, mem};
|
||||
use std::net;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::time::{Delay, Instant};
|
||||
use actix_service::Service;
|
||||
use actix_utils::cloneable::CloneableService;
|
||||
use bitflags::bitflags;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{try_ready, Async, Future, Poll, Sink, Stream};
|
||||
use h2::server::{Connection, SendResponse};
|
||||
use h2::{RecvStream, SendStream};
|
||||
use http::header::{
|
||||
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
|
||||
};
|
||||
use http::HttpTryFrom;
|
||||
use log::{debug, error, trace};
|
||||
use tokio_timer::Delay;
|
||||
use h2::SendStream;
|
||||
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
|
||||
use log::{error, trace};
|
||||
|
||||
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
|
||||
use crate::body::{BodySize, MessageBody, ResponseBody};
|
||||
use crate::cloneable::CloneableService;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::{DispatchError, Error, ParseError, PayloadError, ResponseError};
|
||||
use crate::error::{DispatchError, Error};
|
||||
use crate::helpers::DataFactory;
|
||||
use crate::httpmessage::HttpMessage;
|
||||
use crate::message::ResponseHead;
|
||||
use crate::payload::Payload;
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use crate::Extensions;
|
||||
|
||||
const CHUNK_SIZE: usize = 16_384;
|
||||
|
||||
/// Dispatcher for HTTP/2 protocol
|
||||
pub struct Dispatcher<
|
||||
T: AsyncRead + AsyncWrite,
|
||||
S: Service<Request = Request>,
|
||||
B: MessageBody,
|
||||
> {
|
||||
#[pin_project::pin_project]
|
||||
pub struct Dispatcher<T, S: Service<Request = Request>, B: MessageBody>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
service: CloneableService<S>,
|
||||
connection: Connection<T, Bytes>,
|
||||
on_connect: Option<Box<dyn DataFactory>>,
|
||||
on_connect_data: Extensions,
|
||||
config: ServiceConfig,
|
||||
peer_addr: Option<net::SocketAddr>,
|
||||
ka_expire: Instant,
|
||||
ka_timer: Option<Delay>,
|
||||
_t: PhantomData<B>,
|
||||
@@ -44,18 +47,21 @@ pub struct Dispatcher<
|
||||
|
||||
impl<T, S, B> Dispatcher<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Future: 'static,
|
||||
// S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
B: MessageBody + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
service: CloneableService<S>,
|
||||
connection: Connection<T, Bytes>,
|
||||
on_connect: Option<Box<dyn DataFactory>>,
|
||||
on_connect_data: Extensions,
|
||||
config: ServiceConfig,
|
||||
timeout: Option<Delay>,
|
||||
peer_addr: Option<net::SocketAddr>,
|
||||
) -> Self {
|
||||
// let keepalive = config.keep_alive_enabled();
|
||||
// let flags = if keepalive {
|
||||
@@ -76,9 +82,12 @@ where
|
||||
Dispatcher {
|
||||
service,
|
||||
config,
|
||||
peer_addr,
|
||||
connection,
|
||||
on_connect,
|
||||
on_connect_data,
|
||||
ka_expire,
|
||||
ka_timer,
|
||||
connection,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -86,77 +95,104 @@ where
|
||||
|
||||
impl<T, S, B> Future for Dispatcher<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
type Item = ();
|
||||
type Error = DispatchError;
|
||||
type Output = Result<(), DispatchError>;
|
||||
|
||||
#[inline]
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
match self.connection.poll()? {
|
||||
Async::Ready(None) => return Ok(Async::Ready(())),
|
||||
Async::Ready(Some((req, res))) => {
|
||||
match Pin::new(&mut this.connection).poll_accept(cx) {
|
||||
Poll::Ready(None) => return Poll::Ready(Ok(())),
|
||||
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
|
||||
Poll::Ready(Some(Ok((req, res)))) => {
|
||||
// update keep-alive expire
|
||||
if self.ka_timer.is_some() {
|
||||
if let Some(expire) = self.config.keep_alive_expire() {
|
||||
self.ka_expire = expire;
|
||||
if this.ka_timer.is_some() {
|
||||
if let Some(expire) = this.config.keep_alive_expire() {
|
||||
this.ka_expire = expire;
|
||||
}
|
||||
}
|
||||
|
||||
let (parts, body) = req.into_parts();
|
||||
let mut req = Request::with_payload(body.into());
|
||||
let mut req = Request::with_payload(Payload::<
|
||||
crate::payload::PayloadStream,
|
||||
>::H2(
|
||||
crate::h2::Payload::new(body)
|
||||
));
|
||||
|
||||
let head = &mut req.head_mut();
|
||||
head.uri = parts.uri;
|
||||
head.method = parts.method;
|
||||
head.version = parts.version;
|
||||
head.headers = parts.headers.into();
|
||||
tokio_current_thread::spawn(ServiceResponse::<S::Future, B> {
|
||||
head.peer_addr = this.peer_addr;
|
||||
|
||||
// DEPRECATED
|
||||
// set on_connect data
|
||||
if let Some(ref on_connect) = this.on_connect {
|
||||
on_connect.set(&mut req.extensions_mut());
|
||||
}
|
||||
|
||||
// merge on_connect_ext data into request extensions
|
||||
req.extensions_mut().drain_from(&mut this.on_connect_data);
|
||||
|
||||
actix_rt::spawn(ServiceResponse::<
|
||||
S::Future,
|
||||
S::Response,
|
||||
S::Error,
|
||||
B,
|
||||
> {
|
||||
state: ServiceResponseState::ServiceCall(
|
||||
self.service.call(req),
|
||||
this.service.call(req),
|
||||
Some(res),
|
||||
),
|
||||
config: self.config.clone(),
|
||||
config: this.config.clone(),
|
||||
buffer: None,
|
||||
})
|
||||
_t: PhantomData,
|
||||
});
|
||||
}
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ServiceResponse<F, B> {
|
||||
#[pin_project::pin_project]
|
||||
struct ServiceResponse<F, I, E, B> {
|
||||
#[pin]
|
||||
state: ServiceResponseState<F, B>,
|
||||
config: ServiceConfig,
|
||||
buffer: Option<Bytes>,
|
||||
_t: PhantomData<(I, E)>,
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(project = ServiceResponseStateProj)]
|
||||
enum ServiceResponseState<F, B> {
|
||||
ServiceCall(F, Option<SendResponse<Bytes>>),
|
||||
SendPayload(SendStream<Bytes>, ResponseBody<B>),
|
||||
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
|
||||
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
|
||||
}
|
||||
|
||||
impl<F, B> ServiceResponse<F, B>
|
||||
impl<F, I, E, B> ServiceResponse<F, I, E, B>
|
||||
where
|
||||
F: Future,
|
||||
F::Error: Into<Error>,
|
||||
F::Item: Into<Response<B>>,
|
||||
B: MessageBody + 'static,
|
||||
F: Future<Output = Result<I, E>>,
|
||||
E: Into<Error>,
|
||||
I: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
{
|
||||
fn prepare_response(
|
||||
&self,
|
||||
head: &ResponseHead,
|
||||
length: &mut BodySize,
|
||||
size: &mut BodySize,
|
||||
) -> http::Response<()> {
|
||||
let mut has_date = false;
|
||||
let mut skip_len = length != &BodySize::Stream;
|
||||
let mut skip_len = size != &BodySize::Stream;
|
||||
|
||||
let mut res = http::Response::new(());
|
||||
*res.status_mut() = head.status;
|
||||
@@ -166,14 +202,14 @@ where
|
||||
match head.status {
|
||||
http::StatusCode::NO_CONTENT
|
||||
| http::StatusCode::CONTINUE
|
||||
| http::StatusCode::PROCESSING => *length = BodySize::None,
|
||||
| http::StatusCode::PROCESSING => *size = BodySize::None,
|
||||
http::StatusCode::SWITCHING_PROTOCOLS => {
|
||||
skip_len = true;
|
||||
*length = BodySize::Stream;
|
||||
*size = BodySize::Stream;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
let _ = match length {
|
||||
let _ = match size {
|
||||
BodySize::None | BodySize::Stream => None,
|
||||
BodySize::Empty => res
|
||||
.headers_mut()
|
||||
@@ -182,10 +218,6 @@ where
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::try_from(format!("{}", len)).unwrap(),
|
||||
),
|
||||
BodySize::Sized64(len) => res.headers_mut().insert(
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::try_from(format!("{}", len)).unwrap(),
|
||||
),
|
||||
};
|
||||
|
||||
// copy headers
|
||||
@@ -203,124 +235,137 @@ where
|
||||
if !has_date {
|
||||
let mut bytes = BytesMut::with_capacity(29);
|
||||
self.config.set_date_header(&mut bytes);
|
||||
res.headers_mut()
|
||||
.insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
|
||||
res.headers_mut().insert(
|
||||
DATE,
|
||||
// SAFETY: serialized date-times are known ASCII strings
|
||||
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
|
||||
);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> Future for ServiceResponse<F, B>
|
||||
impl<F, I, E, B> Future for ServiceResponse<F, I, E, B>
|
||||
where
|
||||
F: Future,
|
||||
F::Error: Into<Error>,
|
||||
F::Item: Into<Response<B>>,
|
||||
B: MessageBody + 'static,
|
||||
F: Future<Output = Result<I, E>>,
|
||||
E: Into<Error>,
|
||||
I: Into<Response<B>>,
|
||||
B: MessageBody,
|
||||
{
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
type Output = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.state {
|
||||
ServiceResponseState::ServiceCall(ref mut call, ref mut send) => {
|
||||
match call.poll() {
|
||||
Ok(Async::Ready(res)) => {
|
||||
let (res, body) = res.into().replace_body(());
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut this = self.as_mut().project();
|
||||
|
||||
let mut send = send.take().unwrap();
|
||||
let mut length = body.length();
|
||||
let h2_res = self.prepare_response(res.head(), &mut length);
|
||||
match this.state.project() {
|
||||
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) {
|
||||
Poll::Ready(Ok(res)) => {
|
||||
let (res, body) = res.into().replace_body(());
|
||||
|
||||
let stream = send
|
||||
.send_response(h2_res, length.is_eof())
|
||||
.map_err(|e| {
|
||||
trace!("Error sending h2 response: {:?}", e);
|
||||
})?;
|
||||
let mut send = send.take().unwrap();
|
||||
let mut size = body.size();
|
||||
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
|
||||
this = self.as_mut().project();
|
||||
|
||||
if length.is_eof() {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
self.state = ServiceResponseState::SendPayload(stream, body);
|
||||
self.poll()
|
||||
let stream = match send.send_response(h2_res, size.is_eof()) {
|
||||
Err(e) => {
|
||||
trace!("Error sending h2 response: {:?}", e);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
Ok(stream) => stream,
|
||||
};
|
||||
|
||||
if size.is_eof() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
this.state
|
||||
.set(ServiceResponseState::SendPayload(stream, body));
|
||||
self.poll(cx)
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_e) => {
|
||||
let res: Response = Response::InternalServerError().finish();
|
||||
let (res, body) = res.replace_body(());
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => {
|
||||
let res: Response = e.into().into();
|
||||
let (res, body) = res.replace_body(());
|
||||
|
||||
let mut send = send.take().unwrap();
|
||||
let mut length = body.length();
|
||||
let h2_res = self.prepare_response(res.head(), &mut length);
|
||||
let mut send = send.take().unwrap();
|
||||
let mut size = body.size();
|
||||
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
|
||||
this = self.as_mut().project();
|
||||
|
||||
let stream = send
|
||||
.send_response(h2_res, length.is_eof())
|
||||
.map_err(|e| {
|
||||
trace!("Error sending h2 response: {:?}", e);
|
||||
})?;
|
||||
let stream = match send.send_response(h2_res, size.is_eof()) {
|
||||
Err(e) => {
|
||||
trace!("Error sending h2 response: {:?}", e);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
Ok(stream) => stream,
|
||||
};
|
||||
|
||||
if length.is_eof() {
|
||||
Ok(Async::Ready(()))
|
||||
if size.is_eof() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
this.state.set(ServiceResponseState::SendPayload(
|
||||
stream,
|
||||
body.into_body(),
|
||||
));
|
||||
self.poll(cx)
|
||||
}
|
||||
}
|
||||
},
|
||||
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => {
|
||||
loop {
|
||||
loop {
|
||||
if let Some(ref mut buffer) = this.buffer {
|
||||
match stream.poll_capacity(cx) {
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
Poll::Ready(Some(Ok(cap))) => {
|
||||
let len = buffer.len();
|
||||
let bytes = buffer.split_to(std::cmp::min(cap, len));
|
||||
|
||||
if let Err(e) = stream.send_data(bytes, false) {
|
||||
warn!("{:?}", e);
|
||||
return Poll::Ready(());
|
||||
} else if !buffer.is_empty() {
|
||||
let cap =
|
||||
std::cmp::min(buffer.len(), CHUNK_SIZE);
|
||||
stream.reserve_capacity(cap);
|
||||
} else {
|
||||
this.buffer.take();
|
||||
}
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
warn!("{:?}", e);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.state = ServiceResponseState::SendPayload(
|
||||
stream,
|
||||
body.into_body(),
|
||||
);
|
||||
self.poll()
|
||||
match body.as_mut().poll_next(cx) {
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(None) => {
|
||||
if let Err(e) = stream.send_data(Bytes::new(), true)
|
||||
{
|
||||
warn!("{:?}", e);
|
||||
}
|
||||
return Poll::Ready(());
|
||||
}
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
stream.reserve_capacity(std::cmp::min(
|
||||
chunk.len(),
|
||||
CHUNK_SIZE,
|
||||
));
|
||||
*this.buffer = Some(chunk);
|
||||
}
|
||||
Poll::Ready(Some(Err(e))) => {
|
||||
error!("Response payload stream error: {:?}", e);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
|
||||
loop {
|
||||
if let Some(ref mut buffer) = self.buffer {
|
||||
match stream.poll_capacity().map_err(|e| warn!("{:?}", e))? {
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
Async::Ready(None) => return Ok(Async::Ready(())),
|
||||
Async::Ready(Some(cap)) => {
|
||||
let len = buffer.len();
|
||||
let bytes = buffer.split_to(std::cmp::min(cap, len));
|
||||
|
||||
if let Err(e) = stream.send_data(bytes, false) {
|
||||
warn!("{:?}", e);
|
||||
return Err(());
|
||||
} else if !buffer.is_empty() {
|
||||
let cap = std::cmp::min(buffer.len(), CHUNK_SIZE);
|
||||
stream.reserve_capacity(cap);
|
||||
} else {
|
||||
self.buffer.take();
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match body.poll_next() {
|
||||
Ok(Async::NotReady) => {
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
Ok(Async::Ready(None)) => {
|
||||
if let Err(e) = stream.send_data(Bytes::new(), true) {
|
||||
warn!("{:?}", e);
|
||||
return Err(());
|
||||
} else {
|
||||
return Ok(Async::Ready(()));
|
||||
}
|
||||
}
|
||||
Ok(Async::Ready(Some(chunk))) => {
|
||||
stream.reserve_capacity(std::cmp::min(
|
||||
chunk.len(),
|
||||
CHUNK_SIZE,
|
||||
));
|
||||
self.buffer = Some(chunk);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Response payload stream error: {:?}", e);
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#![allow(dead_code, unused_imports)]
|
||||
|
||||
use std::fmt;
|
||||
//! HTTP/2 implementation
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::{Async, Poll, Stream};
|
||||
use futures_core::Stream;
|
||||
use h2::RecvStream;
|
||||
|
||||
mod dispatcher;
|
||||
@@ -25,22 +25,26 @@ impl Payload {
|
||||
}
|
||||
|
||||
impl Stream for Payload {
|
||||
type Item = Bytes;
|
||||
type Error = PayloadError;
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.pl.poll() {
|
||||
Ok(Async::Ready(Some(chunk))) => {
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.pl).poll_data(cx) {
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
let len = chunk.len();
|
||||
if let Err(err) = self.pl.release_capacity().release_capacity(len) {
|
||||
Err(err.into())
|
||||
if let Err(err) = this.pl.flow_control().release_capacity(len) {
|
||||
Poll::Ready(Some(Err(err.into())))
|
||||
} else {
|
||||
Ok(Async::Ready(Some(chunk)))
|
||||
Poll::Ready(Some(Ok(chunk)))
|
||||
}
|
||||
}
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(err) => Err(err.into()),
|
||||
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))),
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,195 +1,361 @@
|
||||
use std::fmt::Debug;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::{io, net};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{net, rc::Rc};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use actix_server_config::{Io, ServerConfig as SrvConfig};
|
||||
use actix_service::{IntoNewService, NewService, Service};
|
||||
use actix_utils::cloneable::CloneableService;
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{
|
||||
fn_factory, fn_service, pipeline_factory, IntoServiceFactory, Service,
|
||||
ServiceFactory,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::future::{ok, FutureResult};
|
||||
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
|
||||
use h2::server::{self, Connection, Handshake};
|
||||
use h2::RecvStream;
|
||||
use futures_core::ready;
|
||||
use futures_util::future::ok;
|
||||
use h2::server::{self, Handshake};
|
||||
use log::error;
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::config::{KeepAlive, ServiceConfig};
|
||||
use crate::error::{DispatchError, Error, ParseError, ResponseError};
|
||||
use crate::payload::Payload;
|
||||
use crate::cloneable::CloneableService;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::{DispatchError, Error};
|
||||
use crate::helpers::DataFactory;
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use crate::{ConnectCallback, Extensions};
|
||||
|
||||
use super::dispatcher::Dispatcher;
|
||||
|
||||
/// `NewService` implementation for HTTP2 transport
|
||||
pub struct H2Service<T, P, S, B> {
|
||||
/// `ServiceFactory` implementation for HTTP2 transport
|
||||
pub struct H2Service<T, S, B> {
|
||||
srv: S,
|
||||
cfg: ServiceConfig,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B> H2Service<T, P, S, B>
|
||||
impl<T, S, B> H2Service<T, S, B>
|
||||
where
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create new `HttpService` instance.
|
||||
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
|
||||
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
|
||||
|
||||
H2Service {
|
||||
cfg,
|
||||
srv: service.into_new_service(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new `HttpService` instance with config.
|
||||
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
|
||||
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
|
||||
cfg: ServiceConfig,
|
||||
service: F,
|
||||
) -> Self {
|
||||
H2Service {
|
||||
cfg,
|
||||
srv: service.into_new_service(),
|
||||
on_connect: None,
|
||||
on_connect_ext: None,
|
||||
srv: service.into_factory(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set on connect callback.
|
||||
|
||||
pub(crate) fn on_connect(
|
||||
mut self,
|
||||
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
) -> Self {
|
||||
self.on_connect = f;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set on connect callback.
|
||||
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
|
||||
self.on_connect_ext = f;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, B> H2Service<TcpStream, S, B>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create simple tcp based service
|
||||
pub fn tcp(
|
||||
self,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = DispatchError,
|
||||
InitError = S::InitError,
|
||||
> {
|
||||
pipeline_factory(fn_factory(|| async {
|
||||
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
|
||||
let peer_addr = io.peer_addr().ok();
|
||||
ok::<_, DispatchError>((io, peer_addr))
|
||||
}))
|
||||
}))
|
||||
.and_then(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
mod openssl {
|
||||
use actix_service::{fn_factory, fn_service};
|
||||
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
|
||||
use actix_tls::{openssl::HandshakeError, TlsError};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl<S, B> H2Service<SslStream<TcpStream>, S, B>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create ssl based service
|
||||
pub fn openssl(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
|
||||
InitError = S::InitError,
|
||||
> {
|
||||
pipeline_factory(
|
||||
Acceptor::new(acceptor)
|
||||
.map_err(TlsError::Tls)
|
||||
.map_init_err(|_| panic!()),
|
||||
)
|
||||
.and_then(fn_factory(|| {
|
||||
ok::<_, S::InitError>(fn_service(|io: SslStream<TcpStream>| {
|
||||
let peer_addr = io.get_ref().peer_addr().ok();
|
||||
ok((io, peer_addr))
|
||||
}))
|
||||
}))
|
||||
.and_then(self.map_err(TlsError::Service))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P, S, B> NewService<SrvConfig> for H2Service<T, P, S, B>
|
||||
#[cfg(feature = "rustls")]
|
||||
mod rustls {
|
||||
use super::*;
|
||||
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
|
||||
use actix_tls::TlsError;
|
||||
use std::io;
|
||||
|
||||
impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create openssl based service
|
||||
pub fn rustls(
|
||||
self,
|
||||
mut config: ServerConfig,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = TcpStream,
|
||||
Response = (),
|
||||
Error = TlsError<io::Error, DispatchError>,
|
||||
InitError = S::InitError,
|
||||
> {
|
||||
let protos = vec!["h2".to_string().into()];
|
||||
config.set_protocols(&protos);
|
||||
|
||||
pipeline_factory(
|
||||
Acceptor::new(config)
|
||||
.map_err(TlsError::Tls)
|
||||
.map_init_err(|_| panic!()),
|
||||
)
|
||||
.and_then(fn_factory(|| {
|
||||
ok::<_, S::InitError>(fn_service(|io: TlsStream<TcpStream>| {
|
||||
let peer_addr = io.get_ref().0.peer_addr().ok();
|
||||
ok((io, peer_addr))
|
||||
}))
|
||||
}))
|
||||
.and_then(self.map_err(TlsError::Service))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S, B> ServiceFactory for H2Service<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Config = ();
|
||||
type Request = (T, Option<net::SocketAddr>);
|
||||
type Response = ();
|
||||
type Error = DispatchError;
|
||||
type InitError = S::InitError;
|
||||
type Service = H2ServiceHandler<T, P, S::Service, B>;
|
||||
type Future = H2ServiceResponse<T, P, S, B>;
|
||||
type Service = H2ServiceHandler<T, S::Service, B>;
|
||||
type Future = H2ServiceResponse<T, S, B>;
|
||||
|
||||
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
H2ServiceResponse {
|
||||
fut: self.srv.new_service(cfg).into_future(),
|
||||
fut: self.srv.new_service(()),
|
||||
cfg: Some(self.cfg.clone()),
|
||||
on_connect: self.on_connect.clone(),
|
||||
on_connect_ext: self.on_connect_ext.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct H2ServiceResponse<T, P, S: NewService<SrvConfig, Request = Request>, B> {
|
||||
fut: <S::Future as IntoFuture>::Future,
|
||||
#[pin_project::pin_project]
|
||||
pub struct H2ServiceResponse<T, S: ServiceFactory, B> {
|
||||
#[pin]
|
||||
fut: S::Future,
|
||||
cfg: Option<ServiceConfig>,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B> Future for H2ServiceResponse<T, P, S, B>
|
||||
impl<T, S, B> Future for H2ServiceResponse<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
S: NewService<SrvConfig, Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Response: Into<Response<B>>,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
type Item = H2ServiceHandler<T, P, S::Service, B>;
|
||||
type Error = S::InitError;
|
||||
type Output = Result<H2ServiceHandler<T, S::Service, B>, S::InitError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let service = try_ready!(self.fut.poll());
|
||||
Ok(Async::Ready(H2ServiceHandler::new(
|
||||
self.cfg.take().unwrap(),
|
||||
service,
|
||||
)))
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
Poll::Ready(ready!(this.fut.poll(cx)).map(|service| {
|
||||
let this = self.as_mut().project();
|
||||
H2ServiceHandler::new(
|
||||
this.cfg.take().unwrap(),
|
||||
this.on_connect.clone(),
|
||||
this.on_connect_ext.clone(),
|
||||
service,
|
||||
)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// `Service` implementation for http/2 transport
|
||||
pub struct H2ServiceHandler<T, P, S, B> {
|
||||
pub struct H2ServiceHandler<T, S: Service, B> {
|
||||
srv: CloneableService<S>,
|
||||
cfg: ServiceConfig,
|
||||
_t: PhantomData<(T, P, B)>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_t: PhantomData<(T, B)>,
|
||||
}
|
||||
|
||||
impl<T, P, S, B> H2ServiceHandler<T, P, S, B>
|
||||
impl<T, S, B> H2ServiceHandler<T, S, B>
|
||||
where
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
fn new(cfg: ServiceConfig, srv: S) -> H2ServiceHandler<T, P, S, B> {
|
||||
fn new(
|
||||
cfg: ServiceConfig,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
srv: S,
|
||||
) -> H2ServiceHandler<T, S, B> {
|
||||
H2ServiceHandler {
|
||||
cfg,
|
||||
on_connect,
|
||||
on_connect_ext,
|
||||
srv: CloneableService::new(srv),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P, S, B> Service for H2ServiceHandler<T, P, S, B>
|
||||
impl<T, S, B> Service for H2ServiceHandler<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
type Request = Io<T, P>;
|
||||
type Request = (T, Option<net::SocketAddr>);
|
||||
type Response = ();
|
||||
type Error = DispatchError;
|
||||
type Future = H2ServiceHandlerResponse<T, S, B>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
self.srv.poll_ready().map_err(|e| {
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.srv.poll_ready(cx).map_err(|e| {
|
||||
let e = e.into();
|
||||
error!("Service readiness error: {:?}", e);
|
||||
DispatchError::Service(e)
|
||||
})
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Self::Request) -> Self::Future {
|
||||
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
|
||||
let deprecated_on_connect = self.on_connect.as_ref().map(|handler| handler(&io));
|
||||
|
||||
let mut connect_extensions = Extensions::new();
|
||||
if let Some(ref handler) = self.on_connect_ext {
|
||||
// run on_connect_ext callback, populating connect extensions
|
||||
handler(&io, &mut connect_extensions);
|
||||
}
|
||||
|
||||
H2ServiceHandlerResponse {
|
||||
state: State::Handshake(
|
||||
Some(self.srv.clone()),
|
||||
Some(self.cfg.clone()),
|
||||
server::handshake(req.into_parts().0),
|
||||
addr,
|
||||
deprecated_on_connect,
|
||||
Some(connect_extensions),
|
||||
server::handshake(io),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum State<T: AsyncRead + AsyncWrite, S: Service<Request = Request>, B: MessageBody>
|
||||
enum State<T, S: Service<Request = Request>, B: MessageBody>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S::Future: 'static,
|
||||
{
|
||||
Incoming(Dispatcher<T, S, B>),
|
||||
Handshake(
|
||||
Option<CloneableService<S>>,
|
||||
Option<ServiceConfig>,
|
||||
Option<net::SocketAddr>,
|
||||
Option<Box<dyn DataFactory>>,
|
||||
Option<Extensions>,
|
||||
Handshake<T, Bytes>,
|
||||
),
|
||||
}
|
||||
|
||||
pub struct H2ServiceHandlerResponse<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
state: State<T, S, B>,
|
||||
@@ -197,37 +363,44 @@ where
|
||||
|
||||
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
S: Service<Request = Request>,
|
||||
S::Error: Into<Error>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S::Future: 'static,
|
||||
S::Response: Into<Response<B>>,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
type Item = ();
|
||||
type Error = DispatchError;
|
||||
type Output = Result<(), DispatchError>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.state {
|
||||
State::Incoming(ref mut disp) => disp.poll(),
|
||||
State::Handshake(ref mut srv, ref mut config, ref mut handshake) => {
|
||||
match handshake.poll() {
|
||||
Ok(Async::Ready(conn)) => {
|
||||
self.state = State::Incoming(Dispatcher::new(
|
||||
srv.take().unwrap(),
|
||||
conn,
|
||||
config.take().unwrap(),
|
||||
None,
|
||||
));
|
||||
self.poll()
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(err) => {
|
||||
trace!("H2 handshake error: {}", err);
|
||||
Err(err.into())
|
||||
}
|
||||
State::Incoming(ref mut disp) => Pin::new(disp).poll(cx),
|
||||
State::Handshake(
|
||||
ref mut srv,
|
||||
ref mut config,
|
||||
ref peer_addr,
|
||||
ref mut on_connect,
|
||||
ref mut on_connect_data,
|
||||
ref mut handshake,
|
||||
) => match Pin::new(handshake).poll(cx) {
|
||||
Poll::Ready(Ok(conn)) => {
|
||||
self.state = State::Incoming(Dispatcher::new(
|
||||
srv.take().unwrap(),
|
||||
conn,
|
||||
on_connect.take(),
|
||||
on_connect_data.take().unwrap(),
|
||||
config.take().unwrap(),
|
||||
None,
|
||||
*peer_addr,
|
||||
));
|
||||
self.poll(cx)
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(err)) => {
|
||||
trace!("H2 handshake error: {}", err);
|
||||
Poll::Ready(Err(err.into()))
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use mime::Mime;
|
||||
|
||||
use crate::header::{qitem, QualityItem};
|
||||
@@ -7,7 +9,7 @@ header! {
|
||||
/// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2)
|
||||
///
|
||||
/// The `Accept` header field can be used by user agents to specify
|
||||
/// response media types that are acceptable. Accept header fields can
|
||||
/// response media types that are acceptable. Accept header fields can
|
||||
/// be used to indicate that the request is specifically limited to a
|
||||
/// small set of desired types, as in the case of a request for an
|
||||
/// in-line image
|
||||
@@ -97,14 +99,14 @@ header! {
|
||||
test_header!(
|
||||
test1,
|
||||
vec![b"audio/*; q=0.2, audio/basic"],
|
||||
Some(HeaderField(vec![
|
||||
Some(Accept(vec![
|
||||
QualityItem::new("audio/*".parse().unwrap(), q(200)),
|
||||
qitem("audio/basic".parse().unwrap()),
|
||||
])));
|
||||
test_header!(
|
||||
test2,
|
||||
vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"],
|
||||
Some(HeaderField(vec![
|
||||
Some(Accept(vec![
|
||||
QualityItem::new(mime::TEXT_PLAIN, q(500)),
|
||||
qitem(mime::TEXT_HTML),
|
||||
QualityItem::new(
|
||||
@@ -138,23 +140,148 @@ header! {
|
||||
}
|
||||
|
||||
impl Accept {
|
||||
/// A constructor to easily create `Accept: */*`.
|
||||
/// Construct `Accept: */*`.
|
||||
pub fn star() -> Accept {
|
||||
Accept(vec![qitem(mime::STAR_STAR)])
|
||||
}
|
||||
|
||||
/// A constructor to easily create `Accept: application/json`.
|
||||
/// Construct `Accept: application/json`.
|
||||
pub fn json() -> Accept {
|
||||
Accept(vec![qitem(mime::APPLICATION_JSON)])
|
||||
}
|
||||
|
||||
/// A constructor to easily create `Accept: text/*`.
|
||||
/// Construct `Accept: text/*`.
|
||||
pub fn text() -> Accept {
|
||||
Accept(vec![qitem(mime::TEXT_STAR)])
|
||||
}
|
||||
|
||||
/// A constructor to easily create `Accept: image/*`.
|
||||
/// Construct `Accept: image/*`.
|
||||
pub fn image() -> Accept {
|
||||
Accept(vec![qitem(mime::IMAGE_STAR)])
|
||||
}
|
||||
|
||||
/// Construct `Accept: text/html`.
|
||||
pub fn html() -> Accept {
|
||||
Accept(vec![qitem(mime::TEXT_HTML)])
|
||||
}
|
||||
|
||||
/// Returns a sorted list of mime types from highest to lowest preference, accounting for
|
||||
/// [q-factor weighting] and specificity.
|
||||
///
|
||||
/// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
|
||||
pub fn mime_precedence(&self) -> Vec<Mime> {
|
||||
let mut types = self.0.clone();
|
||||
|
||||
// use stable sort so items with equal q-factor and specificity retain listed order
|
||||
types.sort_by(|a, b| {
|
||||
// sort by q-factor descending
|
||||
b.quality.cmp(&a.quality).then_with(|| {
|
||||
// use specificity rules on mime types with
|
||||
// same q-factor (eg. text/html > text/* > */*)
|
||||
|
||||
// subtypes are not comparable if main type is star, so return
|
||||
match (a.item.type_(), b.item.type_()) {
|
||||
(mime::STAR, mime::STAR) => return Ordering::Equal,
|
||||
|
||||
// a is sorted after b
|
||||
(mime::STAR, _) => return Ordering::Greater,
|
||||
|
||||
// a is sorted before b
|
||||
(_, mime::STAR) => return Ordering::Less,
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// in both these match expressions, the returned ordering appears
|
||||
// inverted because sort is high-to-low ("descending") precedence
|
||||
match (a.item.subtype(), b.item.subtype()) {
|
||||
(mime::STAR, mime::STAR) => Ordering::Equal,
|
||||
|
||||
// a is sorted after b
|
||||
(mime::STAR, _) => Ordering::Greater,
|
||||
|
||||
// a is sorted before b
|
||||
(_, mime::STAR) => Ordering::Less,
|
||||
|
||||
_ => Ordering::Equal,
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
types.into_iter().map(|qitem| qitem.item).collect()
|
||||
}
|
||||
|
||||
/// Extracts the most preferable mime type, accounting for [q-factor weighting].
|
||||
///
|
||||
/// If no q-factors are provided, the first mime type is chosen. Note that items without
|
||||
/// q-factors are given the maximum preference value.
|
||||
///
|
||||
/// Returns `None` if contained list is empty.
|
||||
///
|
||||
/// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
|
||||
pub fn mime_preference(&self) -> Option<Mime> {
|
||||
let types = self.mime_precedence();
|
||||
types.first().cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::header::q;
|
||||
|
||||
#[test]
|
||||
fn test_mime_precedence() {
|
||||
let test = Accept(vec![]);
|
||||
assert!(test.mime_precedence().is_empty());
|
||||
|
||||
let test = Accept(vec![qitem(mime::APPLICATION_JSON)]);
|
||||
assert_eq!(test.mime_precedence(), vec!(mime::APPLICATION_JSON));
|
||||
|
||||
let test = Accept(vec![
|
||||
qitem(mime::TEXT_HTML),
|
||||
"application/xhtml+xml".parse().unwrap(),
|
||||
QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
|
||||
QualityItem::new(mime::STAR_STAR, q(0.8)),
|
||||
]);
|
||||
assert_eq!(
|
||||
test.mime_precedence(),
|
||||
vec![
|
||||
mime::TEXT_HTML,
|
||||
"application/xhtml+xml".parse().unwrap(),
|
||||
"application/xml".parse().unwrap(),
|
||||
mime::STAR_STAR,
|
||||
]
|
||||
);
|
||||
|
||||
let test = Accept(vec![
|
||||
qitem(mime::STAR_STAR),
|
||||
qitem(mime::IMAGE_STAR),
|
||||
qitem(mime::IMAGE_PNG),
|
||||
]);
|
||||
assert_eq!(
|
||||
test.mime_precedence(),
|
||||
vec![mime::IMAGE_PNG, mime::IMAGE_STAR, mime::STAR_STAR]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mime_preference() {
|
||||
let test = Accept(vec![
|
||||
qitem(mime::TEXT_HTML),
|
||||
"application/xhtml+xml".parse().unwrap(),
|
||||
QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
|
||||
QualityItem::new(mime::STAR_STAR, q(0.8)),
|
||||
]);
|
||||
assert_eq!(test.mime_preference(), Some(mime::TEXT_HTML));
|
||||
|
||||
let test = Accept(vec![
|
||||
QualityItem::new("video/*".parse().unwrap(), q(0.8)),
|
||||
qitem(mime::IMAGE_PNG),
|
||||
QualityItem::new(mime::STAR_STAR, q(0.5)),
|
||||
qitem(mime::IMAGE_SVG),
|
||||
QualityItem::new(mime::IMAGE_STAR, q(0.8)),
|
||||
]);
|
||||
assert_eq!(test.mime_preference(), Some(mime::IMAGE_PNG));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ header! {
|
||||
(AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+
|
||||
|
||||
test_accept_charset {
|
||||
/// Test case from RFC
|
||||
// Test case from RFC
|
||||
test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use http::Method;
|
||||
use http::header;
|
||||
use http::Method;
|
||||
|
||||
header! {
|
||||
/// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1)
|
||||
|
||||
@@ -74,18 +74,18 @@ impl Header for CacheControl {
|
||||
}
|
||||
|
||||
impl fmt::Display for CacheControl {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_comma_delimited(f, &self[..])
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoHeaderValue for CacheControl {
|
||||
type Error = header::InvalidHeaderValueBytes;
|
||||
type Error = header::InvalidHeaderValue;
|
||||
|
||||
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
|
||||
let mut writer = Writer::new();
|
||||
let _ = write!(&mut writer, "{}", self);
|
||||
header::HeaderValue::from_shared(writer.take())
|
||||
header::HeaderValue::from_maybe_shared(writer.take())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ pub enum CacheDirective {
|
||||
}
|
||||
|
||||
impl fmt::Display for CacheDirective {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use self::CacheDirective::*;
|
||||
fmt::Display::fmt(
|
||||
match *self {
|
||||
|
||||
@@ -70,11 +70,17 @@ impl<'a> From<&'a str> for DispositionType {
|
||||
/// assert_eq!(param.as_filename().unwrap(), "sample.txt");
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum DispositionParam {
|
||||
/// For [`DispositionType::FormData`] (i.e. *multipart/form-data*), the name of an field from
|
||||
/// the form.
|
||||
Name(String),
|
||||
/// A plain file name.
|
||||
///
|
||||
/// It is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
|
||||
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
|
||||
/// [`FilenameExt`](DispositionParam::FilenameExt) with charset UTF-8 may be used instead
|
||||
/// in case there are Unicode characters in file names.
|
||||
Filename(String),
|
||||
/// An extended file name. It must not exist for `ContentType::Formdata` according to
|
||||
/// [RFC7578 Section 4.2](https://tools.ietf.org/html/rfc7578#section-4.2).
|
||||
@@ -84,40 +90,40 @@ pub enum DispositionParam {
|
||||
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
|
||||
/// ignore unrecognizable parameters.
|
||||
Unknown(String, String),
|
||||
/// An unrecognized extended paramater as defined in
|
||||
/// An unrecognized extended parameter as defined in
|
||||
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
|
||||
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
|
||||
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
|
||||
/// trailing asterisk is not included. Recipients should ignore unrecognizable parameters.
|
||||
UnknownExt(String, ExtendedValue),
|
||||
}
|
||||
|
||||
impl DispositionParam {
|
||||
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
|
||||
/// Returns `true` if the parameter is [`Name`](DispositionParam::Name).
|
||||
#[inline]
|
||||
pub fn is_name(&self) -> bool {
|
||||
self.as_name().is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
|
||||
/// Returns `true` if the parameter is [`Filename`](DispositionParam::Filename).
|
||||
#[inline]
|
||||
pub fn is_filename(&self) -> bool {
|
||||
self.as_filename().is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
|
||||
/// Returns `true` if the parameter is [`FilenameExt`](DispositionParam::FilenameExt).
|
||||
#[inline]
|
||||
pub fn is_filename_ext(&self) -> bool {
|
||||
self.as_filename_ext().is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
|
||||
/// Returns `true` if the parameter is [`Unknown`](DispositionParam::Unknown) and the `name`
|
||||
#[inline]
|
||||
/// matches.
|
||||
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
|
||||
self.as_unknown(name).is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
|
||||
/// Returns `true` if the parameter is [`UnknownExt`](DispositionParam::UnknownExt) and the
|
||||
/// `name` matches.
|
||||
#[inline]
|
||||
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
|
||||
@@ -219,7 +225,16 @@ impl DispositionParam {
|
||||
/// ext-token = <the characters in token, followed by "*">
|
||||
/// ```
|
||||
///
|
||||
/// **Note**: filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
|
||||
/// # Note
|
||||
///
|
||||
/// filename is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
|
||||
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
|
||||
/// filename* with charset UTF-8 may be used instead in case there are Unicode characters in file
|
||||
/// names.
|
||||
/// filename is [acceptable](https://tools.ietf.org/html/rfc7578#section-4.2) to be UTF-8 encoded
|
||||
/// directly in a *Content-Disposition* header for *multipart/form-data*, though.
|
||||
///
|
||||
/// filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
|
||||
/// *multipart/form-data*.
|
||||
///
|
||||
/// # Example
|
||||
@@ -250,13 +265,29 @@ impl DispositionParam {
|
||||
/// };
|
||||
/// assert_eq!(cd2.get_name(), Some("file")); // field name
|
||||
/// assert_eq!(cd2.get_filename(), Some("bill.odt"));
|
||||
///
|
||||
/// // HTTP response header with Unicode characters in file names
|
||||
/// let cd3 = ContentDisposition {
|
||||
/// disposition: DispositionType::Attachment,
|
||||
/// parameters: vec![
|
||||
/// DispositionParam::FilenameExt(ExtendedValue {
|
||||
/// charset: Charset::Ext(String::from("UTF-8")),
|
||||
/// language_tag: None,
|
||||
/// value: String::from("\u{1f600}.svg").into_bytes(),
|
||||
/// }),
|
||||
/// // fallback for better compatibility
|
||||
/// DispositionParam::Filename(String::from("Grinning-Face-Emoji.svg"))
|
||||
/// ],
|
||||
/// };
|
||||
/// assert_eq!(cd3.get_filename_ext().map(|ev| ev.value.as_ref()),
|
||||
/// Some("\u{1f600}.svg".as_bytes()));
|
||||
/// ```
|
||||
///
|
||||
/// # WARN
|
||||
/// # Security Note
|
||||
///
|
||||
/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly
|
||||
/// change to match local file system conventions if applicable, and do not use directory path
|
||||
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3)
|
||||
/// .
|
||||
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3).
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ContentDisposition {
|
||||
/// The disposition type
|
||||
@@ -332,15 +363,17 @@ impl ContentDisposition {
|
||||
// token: won't contains semicolon according to RFC 2616 Section 2.2
|
||||
let (token, new_left) = split_once_and_trim(left, ';');
|
||||
left = new_left;
|
||||
if token.is_empty() {
|
||||
// quoted-string can be empty, but token cannot be empty
|
||||
return Err(crate::error::ParseError::Header);
|
||||
}
|
||||
token.to_owned()
|
||||
};
|
||||
if value.is_empty() {
|
||||
return Err(crate::error::ParseError::Header);
|
||||
}
|
||||
|
||||
let param = if param_name.eq_ignore_ascii_case("name") {
|
||||
DispositionParam::Name(value)
|
||||
} else if param_name.eq_ignore_ascii_case("filename") {
|
||||
// See also comments in test_from_raw_unnecessary_percent_decode.
|
||||
DispositionParam::Filename(value)
|
||||
} else {
|
||||
DispositionParam::Unknown(param_name.to_owned(), value)
|
||||
@@ -354,26 +387,17 @@ impl ContentDisposition {
|
||||
|
||||
/// Returns `true` if it is [`Inline`](DispositionType::Inline).
|
||||
pub fn is_inline(&self) -> bool {
|
||||
match self.disposition {
|
||||
DispositionType::Inline => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.disposition, DispositionType::Inline)
|
||||
}
|
||||
|
||||
/// Returns `true` if it is [`Attachment`](DispositionType::Attachment).
|
||||
pub fn is_attachment(&self) -> bool {
|
||||
match self.disposition {
|
||||
DispositionType::Attachment => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.disposition, DispositionType::Attachment)
|
||||
}
|
||||
|
||||
/// Returns `true` if it is [`FormData`](DispositionType::FormData).
|
||||
pub fn is_form_data(&self) -> bool {
|
||||
match self.disposition {
|
||||
DispositionType::FormData => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.disposition, DispositionType::FormData)
|
||||
}
|
||||
|
||||
/// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches.
|
||||
@@ -390,7 +414,7 @@ impl ContentDisposition {
|
||||
|
||||
/// Return the value of *name* if exists.
|
||||
pub fn get_name(&self) -> Option<&str> {
|
||||
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
|
||||
self.parameters.iter().filter_map(|p| p.as_name()).next()
|
||||
}
|
||||
|
||||
/// Return the value of *filename* if exists.
|
||||
@@ -398,7 +422,7 @@ impl ContentDisposition {
|
||||
self.parameters
|
||||
.iter()
|
||||
.filter_map(|p| p.as_filename())
|
||||
.nth(0)
|
||||
.next()
|
||||
}
|
||||
|
||||
/// Return the value of *filename\** if exists.
|
||||
@@ -406,7 +430,7 @@ impl ContentDisposition {
|
||||
self.parameters
|
||||
.iter()
|
||||
.filter_map(|p| p.as_filename_ext())
|
||||
.nth(0)
|
||||
.next()
|
||||
}
|
||||
|
||||
/// Return the value of the parameter which the `name` matches.
|
||||
@@ -415,7 +439,7 @@ impl ContentDisposition {
|
||||
self.parameters
|
||||
.iter()
|
||||
.filter_map(|p| p.as_unknown(name))
|
||||
.nth(0)
|
||||
.next()
|
||||
}
|
||||
|
||||
/// Return the value of the extended parameter which the `name` matches.
|
||||
@@ -424,17 +448,17 @@ impl ContentDisposition {
|
||||
self.parameters
|
||||
.iter()
|
||||
.filter_map(|p| p.as_unknown_ext(name))
|
||||
.nth(0)
|
||||
.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoHeaderValue for ContentDisposition {
|
||||
type Error = header::InvalidHeaderValueBytes;
|
||||
type Error = header::InvalidHeaderValue;
|
||||
|
||||
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
|
||||
let mut writer = Writer::new();
|
||||
let _ = write!(&mut writer, "{}", self);
|
||||
header::HeaderValue::from_shared(writer.take())
|
||||
header::HeaderValue::from_maybe_shared(writer.take())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,7 +477,7 @@ impl Header for ContentDisposition {
|
||||
}
|
||||
|
||||
impl fmt::Display for DispositionType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DispositionType::Inline => write!(f, "inline"),
|
||||
DispositionType::Attachment => write!(f, "attachment"),
|
||||
@@ -464,12 +488,41 @@ impl fmt::Display for DispositionType {
|
||||
}
|
||||
|
||||
impl fmt::Display for DispositionParam {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
// All ASCII control charaters (0-30, 127) excepting horizontal tab, double quote, and
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// All ASCII control characters (0-30, 127) including horizontal tab, double quote, and
|
||||
// backslash should be escaped in quoted-string (i.e. "foobar").
|
||||
// Ref: RFC6266 S4.1 -> RFC2616 S2.2; RFC 7578 S4.2 -> RFC2183 S2 -> ... .
|
||||
// Ref: RFC6266 S4.1 -> RFC2616 S3.6
|
||||
// filename-parm = "filename" "=" value
|
||||
// value = token | quoted-string
|
||||
// quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
|
||||
// qdtext = <any TEXT except <">>
|
||||
// quoted-pair = "\" CHAR
|
||||
// TEXT = <any OCTET except CTLs,
|
||||
// but including LWS>
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
//
|
||||
// Ref: RFC7578 S4.2 -> RFC2183 S2 -> RFC2045 S5.1
|
||||
// parameter := attribute "=" value
|
||||
// attribute := token
|
||||
// ; Matching of attributes
|
||||
// ; is ALWAYS case-insensitive.
|
||||
// value := token / quoted-string
|
||||
// token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,
|
||||
// or tspecials>
|
||||
// tspecials := "(" / ")" / "<" / ">" / "@" /
|
||||
// "," / ";" / ":" / "\" / <">
|
||||
// "/" / "[" / "]" / "?" / "="
|
||||
// ; Must be in quoted-string,
|
||||
// ; to use within parameter values
|
||||
//
|
||||
//
|
||||
// See also comments in test_from_raw_unnecessary_percent_decode.
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new("[\x01-\x08\x10\x1F\x7F\"\\\\]").unwrap();
|
||||
static ref RE: Regex = Regex::new("[\x00-\x08\x10-\x1F\x7F\"\\\\]").unwrap();
|
||||
}
|
||||
match self {
|
||||
DispositionParam::Name(ref value) => write!(f, "name={}", value),
|
||||
@@ -493,7 +546,7 @@ impl fmt::Display for DispositionParam {
|
||||
}
|
||||
|
||||
impl fmt::Display for ContentDisposition {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.disposition)?;
|
||||
self.parameters
|
||||
.iter()
|
||||
@@ -614,7 +667,7 @@ mod tests {
|
||||
fn test_from_raw_unordered() {
|
||||
let a = HeaderValue::from_static(
|
||||
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
|
||||
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
|
||||
// Actually, a trailing semicolon is not compliant. But it is fine to accept.
|
||||
);
|
||||
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
|
||||
let b = ContentDisposition {
|
||||
@@ -706,9 +759,8 @@ mod tests {
|
||||
Mainstream browsers like Firefox (gecko) and Chrome use UTF-8 directly as above.
|
||||
(And now, only UTF-8 is handled by this implementation.)
|
||||
*/
|
||||
let a =
|
||||
HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
|
||||
.unwrap();
|
||||
let a = HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
|
||||
.unwrap();
|
||||
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
|
||||
let b = ContentDisposition {
|
||||
disposition: DispositionType::FormData,
|
||||
@@ -719,8 +771,10 @@ mod tests {
|
||||
};
|
||||
assert_eq!(a, b);
|
||||
|
||||
let a =
|
||||
HeaderValue::from_str("form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"").unwrap();
|
||||
let a = HeaderValue::from_str(
|
||||
"form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"",
|
||||
)
|
||||
.unwrap();
|
||||
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
|
||||
let b = ContentDisposition {
|
||||
disposition: DispositionType::FormData,
|
||||
@@ -770,9 +824,19 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_raw_uncessary_percent_decode() {
|
||||
fn test_from_raw_unnecessary_percent_decode() {
|
||||
// In fact, RFC7578 (multipart/form-data) Section 2 and 4.2 suggests that filename with
|
||||
// non-ASCII characters MAY be percent-encoded.
|
||||
// On the contrary, RFC6266 or other RFCs related to Content-Disposition response header
|
||||
// do not mention such percent-encoding.
|
||||
// So, it appears to be undecidable whether to percent-decode or not without
|
||||
// knowing the usage scenario (multipart/form-data v.s. HTTP response header) and
|
||||
// inevitable to unnecessarily percent-decode filename with %XX in the former scenario.
|
||||
// Fortunately, it seems that almost all mainstream browsers just send UTF-8 encoded file
|
||||
// names in quoted-string format (tested on Edge, IE11, Chrome and Firefox) without
|
||||
// percent-encoding. So we do not bother to attempt to percent-decode.
|
||||
let a = HeaderValue::from_static(
|
||||
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"", // Should not be decoded!
|
||||
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"",
|
||||
);
|
||||
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
|
||||
let b = ContentDisposition {
|
||||
@@ -808,6 +872,13 @@ mod tests {
|
||||
|
||||
let a = HeaderValue::from_static("inline; filename= ");
|
||||
assert!(ContentDisposition::from_raw(&a).is_err());
|
||||
|
||||
let a = HeaderValue::from_static("inline; filename=\"\"");
|
||||
assert!(ContentDisposition::from_raw(&a)
|
||||
.expect("parse cd")
|
||||
.get_filename()
|
||||
.expect("filename")
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::str::FromStr;
|
||||
|
||||
use crate::error::ParseError;
|
||||
use crate::header::{
|
||||
HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer, CONTENT_RANGE,
|
||||
HeaderValue, IntoHeaderValue, InvalidHeaderValue, Writer, CONTENT_RANGE,
|
||||
};
|
||||
|
||||
header! {
|
||||
@@ -166,7 +166,7 @@ impl FromStr for ContentRangeSpec {
|
||||
}
|
||||
|
||||
impl Display for ContentRangeSpec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
ContentRangeSpec::Bytes {
|
||||
range,
|
||||
@@ -198,11 +198,11 @@ impl Display for ContentRangeSpec {
|
||||
}
|
||||
|
||||
impl IntoHeaderValue for ContentRangeSpec {
|
||||
type Error = InvalidHeaderValueBytes;
|
||||
type Error = InvalidHeaderValue;
|
||||
|
||||
fn try_into(self) -> Result<HeaderValue, Self::Error> {
|
||||
let mut writer = Writer::new();
|
||||
let _ = write!(&mut writer, "{}", self);
|
||||
HeaderValue::from_shared(writer.take())
|
||||
HeaderValue::from_maybe_shared(writer.take())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::fmt::{self, Display, Write};
|
||||
use crate::error::ParseError;
|
||||
use crate::header::{
|
||||
self, from_one_raw_str, EntityTag, Header, HeaderName, HeaderValue, HttpDate,
|
||||
IntoHeaderValue, InvalidHeaderValueBytes, Writer,
|
||||
IntoHeaderValue, InvalidHeaderValue, Writer,
|
||||
};
|
||||
use crate::httpmessage::HttpMessage;
|
||||
|
||||
@@ -87,7 +87,7 @@ impl Header for IfRange {
|
||||
}
|
||||
|
||||
impl Display for IfRange {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
IfRange::EntityTag(ref x) => Display::fmt(x, f),
|
||||
IfRange::Date(ref x) => Display::fmt(x, f),
|
||||
@@ -96,12 +96,12 @@ impl Display for IfRange {
|
||||
}
|
||||
|
||||
impl IntoHeaderValue for IfRange {
|
||||
type Error = InvalidHeaderValueBytes;
|
||||
type Error = InvalidHeaderValue;
|
||||
|
||||
fn try_into(self) -> Result<HeaderValue, Self::Error> {
|
||||
let mut writer = Writer::new();
|
||||
let _ = write!(&mut writer, "{}", self);
|
||||
HeaderValue::from_shared(writer.take())
|
||||
HeaderValue::from_maybe_shared(writer.take())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user