From 4c4024c949f219d9108f00c8cda759e82c6f81b3 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sat, 11 Mar 2023 22:14:58 +0000 Subject: [PATCH 01/12] fix minimal version specs for mime --- actix-files/Cargo.toml | 4 ++-- actix-http-test/Cargo.toml | 4 ++-- actix-http/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/actix-files/Cargo.toml b/actix-files/Cargo.toml index 4c29c95b2..6909c0ef2 100644 --- a/actix-files/Cargo.toml +++ b/actix-files/Cargo.toml @@ -32,11 +32,11 @@ derive_more = "0.99.5" futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } http-range = "0.1.4" log = "0.4" -mime = "0.3" +mime = "0.3.9" mime_guess = "2.0.1" percent-encoding = "2.1" pin-project-lite = "0.2.7" -v_htmlescape= "0.15" +v_htmlescape = "0.15.5" # experimental-io-uring [target.'cfg(target_os = "linux")'.dependencies] diff --git a/actix-http-test/Cargo.toml b/actix-http-test/Cargo.toml index cb4754125..12739fbd4 100644 --- a/actix-http-test/Cargo.toml +++ b/actix-http-test/Cargo.toml @@ -42,8 +42,8 @@ futures-core = { version = "0.3.17", default-features = false } http = "0.2.7" log = "0.4" socket2 = "0.4" -serde = "1.0" -serde_json = "1.0" +serde = "1" +serde_json = "1" slab = "0.4" serde_urlencoded = "0.7" tls-openssl = { version = "0.10.9", package = "openssl", optional = true } diff --git a/actix-http/Cargo.toml b/actix-http/Cargo.toml index d2218f6de..07e412a2c 100644 --- a/actix-http/Cargo.toml +++ b/actix-http/Cargo.toml @@ -73,7 +73,7 @@ httparse = "1.5.1" httpdate = "1.0.1" itoa = "1" language-tags = "0.3" -mime = "0.3" +mime = "0.3.4" percent-encoding = "2.1" pin-project-lite = "0.2" smallvec = "1.6.1" From 3fc01c48878ee487b872551d9ac5898fc2031086 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sat, 11 Mar 2023 22:17:52 +0000 Subject: [PATCH 02/12] refactor server binding --- actix-web/src/server.rs | 67 +++++++++++++++++++---------------- actix-web/src/types/either.rs | 6 ++-- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/actix-web/src/server.rs b/actix-web/src/server.rs index c87fea7f3..1fa279a65 100644 --- a/actix-web/src/server.rs +++ b/actix-web/src/server.rs @@ -338,7 +338,7 @@ where /// # ; Ok(()) } /// ``` pub fn bind(mut self, addrs: A) -> io::Result { - let sockets = self.bind2(addrs)?; + let sockets = bind_addrs(addrs, self.backlog)?; for lst in sockets { self = self.listen(lst)?; @@ -347,33 +347,6 @@ where Ok(self) } - fn bind2(&self, addrs: A) -> io::Result> { - let mut err = None; - let mut success = false; - let mut sockets = Vec::new(); - - for addr in addrs.to_socket_addrs()? { - match create_tcp_listener(addr, self.backlog) { - Ok(lst) => { - success = true; - sockets.push(lst); - } - Err(e) => err = Some(e), - } - } - - if success { - Ok(sockets) - } else if let Some(e) = err.take() { - Err(e) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "Can not bind to address.", - )) - } - } - /// Resolves socket address(es) and binds server to created listener(s) for TLS connections /// using Rustls. /// @@ -386,7 +359,7 @@ where addrs: A, config: RustlsServerConfig, ) -> io::Result { - let sockets = self.bind2(addrs)?; + let sockets = bind_addrs(addrs, self.backlog)?; for lst in sockets { self = self.listen_rustls_inner(lst, config.clone())?; } @@ -404,7 +377,7 @@ where where A: net::ToSocketAddrs, { - let sockets = self.bind2(addrs)?; + let sockets = bind_addrs(addrs, self.backlog)?; let acceptor = openssl_acceptor(builder)?; for lst in sockets { @@ -719,6 +692,38 @@ where } } +/// Bind TCP listeners to socket addresses resolved from `addrs` with options. +fn bind_addrs( + addrs: impl net::ToSocketAddrs, + backlog: u32, +) -> io::Result> { + let mut err = None; + let mut success = false; + let mut sockets = Vec::new(); + + for addr in addrs.to_socket_addrs()? { + match create_tcp_listener(addr, backlog) { + Ok(lst) => { + success = true; + sockets.push(lst); + } + Err(e) => err = Some(e), + } + } + + if success { + Ok(sockets) + } else if let Some(err) = err.take() { + Err(err) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "Can not bind to address.", + )) + } +} + +/// Creates a TCP listener from socket address and options. fn create_tcp_listener(addr: net::SocketAddr, backlog: u32) -> io::Result { use socket2::{Domain, Protocol, Socket, Type}; let domain = Domain::for_address(addr); @@ -731,7 +736,7 @@ fn create_tcp_listener(addr: net::SocketAddr, backlog: u32) -> io::Result io::Result { builder.set_alpn_select_callback(|_, protocols| { diff --git a/actix-web/src/types/either.rs b/actix-web/src/types/either.rs index 119dd0d62..df93fb5ec 100644 --- a/actix-web/src/types/either.rs +++ b/actix-web/src/types/either.rs @@ -304,7 +304,7 @@ mod tests { #[actix_rt::test] async fn test_either_extract_first_try() { let (req, mut pl) = TestRequest::default() - .set_form(&TestForm { + .set_form(TestForm { hello: "world".to_owned(), }) .to_http_parts(); @@ -320,7 +320,7 @@ mod tests { #[actix_rt::test] async fn test_either_extract_fallback() { let (req, mut pl) = TestRequest::default() - .set_json(&TestForm { + .set_json(TestForm { hello: "world".to_owned(), }) .to_http_parts(); @@ -351,7 +351,7 @@ mod tests { #[actix_rt::test] async fn test_either_extract_recursive_fallback_inner() { let (req, mut pl) = TestRequest::default() - .set_json(&TestForm { + .set_json(TestForm { hello: "world".to_owned(), }) .to_http_parts(); From 0ba147ef71072e7ce780da85a71307e8de138e38 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sat, 11 Mar 2023 23:19:03 +0000 Subject: [PATCH 03/12] update actions/checkout to v3 --- .github/workflows/bench.yml | 2 +- .github/workflows/ci-post-merge.yml | 6 +++--- .github/workflows/ci.yml | 8 ++++---- .github/workflows/clippy-fmt.yml | 6 +++--- .github/workflows/coverage.yml | 2 +- .github/workflows/upload-doc.yml | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 008c33f89..3d16a7eb7 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install Rust uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/ci-post-merge.yml b/.github/workflows/ci-post-merge.yml index 7ac6388d4..30d13bf88 100644 --- a/.github/workflows/ci-post-merge.yml +++ b/.github/workflows/ci-post-merge.yml @@ -29,7 +29,7 @@ jobs: CARGO_UNSTABLE_SPARSE_REGISTRY: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # install OpenSSL on Windows # TODO: GitHub actions docs state that OpenSSL is @@ -93,7 +93,7 @@ jobs: CARGO_INCREMENTAL: 0 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@stable @@ -120,7 +120,7 @@ jobs: CARGO_INCREMENTAL: 0 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@stable diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 421becc63..48380265a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: branches: [master] permissions: - contents: read # to fetch code (actions/checkout) + contents: read # to fetch code (actions/checkout) jobs: build_and_test: @@ -31,7 +31,7 @@ jobs: VCPKGRS_DYNAMIC: 1 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # install OpenSSL on Windows # TODO: GitHub actions docs state that OpenSSL is @@ -102,7 +102,7 @@ jobs: name: io-uring tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@stable @@ -123,7 +123,7 @@ jobs: name: doc tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@nightly diff --git a/.github/workflows/clippy-fmt.yml b/.github/workflows/clippy-fmt.yml index e94c4d1af..877ca74e4 100644 --- a/.github/workflows/clippy-fmt.yml +++ b/.github/workflows/clippy-fmt.yml @@ -8,7 +8,7 @@ jobs: fmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@nightly with: { components: rustfmt } - run: cargo fmt --all -- --check @@ -16,7 +16,7 @@ jobs: clippy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@stable with: { components: clippy } @@ -35,7 +35,7 @@ jobs: lint-docs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@stable with: { components: rust-docs } diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 137a413d0..bb6d7fb97 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -12,7 +12,7 @@ jobs: name: coverage runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install stable uses: actions-rs/toolchain@v1 diff --git a/.github/workflows/upload-doc.yml b/.github/workflows/upload-doc.yml index 9aadafafc..2464ebcd6 100644 --- a/.github/workflows/upload-doc.yml +++ b/.github/workflows/upload-doc.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dtolnay/rust-toolchain@nightly From 4131786127bb1633a2731108195055b52d7a8cf6 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sat, 11 Mar 2023 23:20:02 +0000 Subject: [PATCH 04/12] remove old benchmarks --- actix-http/Cargo.toml | 16 -- actix-http/benches/quality-value.rs | 97 ----------- actix-http/benches/status-line.rs | 214 ------------------------- actix-http/benches/uninit-headers.rs | 135 ---------------- actix-http/benches/write-camel-case.rs | 93 ----------- 5 files changed, 555 deletions(-) delete mode 100644 actix-http/benches/quality-value.rs delete mode 100644 actix-http/benches/status-line.rs delete mode 100644 actix-http/benches/uninit-headers.rs delete mode 100644 actix-http/benches/write-camel-case.rs diff --git a/actix-http/Cargo.toml b/actix-http/Cargo.toml index 07e412a2c..ce653f440 100644 --- a/actix-http/Cargo.toml +++ b/actix-http/Cargo.toml @@ -124,19 +124,3 @@ tokio = { version = "1.24.2", features = ["net", "rt", "macros"] } [[example]] name = "ws" required-features = ["ws", "rustls"] - -[[bench]] -name = "write-camel-case" -harness = false - -[[bench]] -name = "status-line" -harness = false - -[[bench]] -name = "uninit-headers" -harness = false - -[[bench]] -name = "quality-value" -harness = false diff --git a/actix-http/benches/quality-value.rs b/actix-http/benches/quality-value.rs deleted file mode 100644 index 0ed274ded..000000000 --- a/actix-http/benches/quality-value.rs +++ /dev/null @@ -1,97 +0,0 @@ -#![allow(clippy::uninlined_format_args)] - -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; - -const CODES: &[u16] = &[0, 1000, 201, 800, 550]; - -fn bench_quality_display_impls(c: &mut Criterion) { - let mut group = c.benchmark_group("quality value display impls"); - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| { - b.iter(|| _new::Quality(i).to_string()) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| _naive::Quality(i).to_string()) - }); - } - - group.finish(); -} - -criterion_group!(benches, bench_quality_display_impls); -criterion_main!(benches); - -mod _new { - use std::fmt; - - pub struct Quality(pub(crate) u16); - - impl fmt::Display for Quality { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0 { - 0 => f.write_str("0"), - 1000 => f.write_str("1"), - - // some number in the range 1–999 - x => { - f.write_str("0.")?; - - // this implementation avoids string allocation otherwise required - // for `.trim_end_matches('0')` - - if x < 10 { - f.write_str("00")?; - // 0 is handled so it's not possible to have a trailing 0, we can just return - itoa_fmt(f, x) - } else if x < 100 { - f.write_str("0")?; - if x % 10 == 0 { - // trailing 0, divide by 10 and write - itoa_fmt(f, x / 10) - } else { - itoa_fmt(f, x) - } - } else { - // x is in range 101–999 - - if x % 100 == 0 { - // two trailing 0s, divide by 100 and write - itoa_fmt(f, x / 100) - } else if x % 10 == 0 { - // one trailing 0, divide by 10 and write - itoa_fmt(f, x / 10) - } else { - itoa_fmt(f, x) - } - } - } - } - } - } - - pub fn itoa_fmt(mut wr: W, value: V) -> fmt::Result { - let mut buf = itoa::Buffer::new(); - wr.write_str(buf.format(value)) - } -} - -mod _naive { - use std::fmt; - - pub struct Quality(pub(crate) u16); - - impl fmt::Display for Quality { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0 { - 0 => f.write_str("0"), - 1000 => f.write_str("1"), - - x => { - write!(f, "{}", format!("{:03}", x).trim_end_matches('0')) - } - } - } - } -} diff --git a/actix-http/benches/status-line.rs b/actix-http/benches/status-line.rs deleted file mode 100644 index 9fe099478..000000000 --- a/actix-http/benches/status-line.rs +++ /dev/null @@ -1,214 +0,0 @@ -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; - -use bytes::BytesMut; -use http::Version; - -const CODES: &[u16] = &[201, 303, 404, 515]; - -fn bench_write_status_line_11(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v1.1"); - - let version = Version::HTTP_11; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -fn bench_write_status_line_10(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v1.0"); - - let version = Version::HTTP_10; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -fn bench_write_status_line_09(c: &mut Criterion) { - let mut group = c.benchmark_group("write_status_line v0.9"); - - let version = Version::HTTP_09; - - for i in CODES.iter() { - group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _original::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _new::write_status_line(version, i, &mut b); - }) - }); - - group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| { - b.iter(|| { - let mut b = BytesMut::with_capacity(35); - _naive::write_status_line(version, i, &mut b); - }) - }); - } - - group.finish(); -} - -criterion_group!( - benches, - bench_write_status_line_11, - bench_write_status_line_10, - bench_write_status_line_09 -); -criterion_main!(benches); - -mod _naive { - use bytes::{BufMut, BytesMut}; - use http::Version; - - pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) { - match version { - Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "), - Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "), - Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "), - _ => { - // other HTTP version handlers do not use this method - } - } - - bytes.put_slice(n.to_string().as_bytes()); - } -} - -mod _new { - use bytes::{BufMut, BytesMut}; - use http::Version; - - const DIGITS_START: u8 = b'0'; - - pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) { - match version { - Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "), - Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "), - Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "), - _ => { - // other HTTP version handlers do not use this method - } - } - - let d100 = (n / 100) as u8; - let d10 = ((n / 10) % 10) as u8; - let d1 = (n % 10) as u8; - - bytes.put_u8(DIGITS_START + d100); - bytes.put_u8(DIGITS_START + d10); - bytes.put_u8(DIGITS_START + d1); - - bytes.put_u8(b' '); - } -} - -mod _original { - use std::ptr; - - use bytes::{BufMut, BytesMut}; - use http::Version; - - const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\ - 2021222324252627282930313233343536373839\ - 4041424344454647484950515253545556575859\ - 6061626364656667686970717273747576777879\ - 8081828384858687888990919293949596979899"; - - pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13; - - pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) { - let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 "; - - match version { - Version::HTTP_2 => buf[5] = b'2', - Version::HTTP_10 => buf[7] = b'0', - Version::HTTP_09 => { - buf[5] = b'0'; - buf[7] = b'9'; - } - _ => {} - } - - let mut curr: isize = 12; - let buf_ptr = buf.as_mut_ptr(); - let lut_ptr = DEC_DIGITS_LUT.as_ptr(); - let four = n > 999; - - // decode 2 more chars, if > 2 chars - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; - unsafe { - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); - } - - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; - unsafe { - *buf_ptr.offset(curr) = (n as u8) + b'0'; - } - } else { - let d1 = n << 1; - curr -= 2; - unsafe { - ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); - } - } - - bytes.put_slice(&buf); - if four { - bytes.put_u8(b' '); - } - } -} diff --git a/actix-http/benches/uninit-headers.rs b/actix-http/benches/uninit-headers.rs deleted file mode 100644 index 688c64d6e..000000000 --- a/actix-http/benches/uninit-headers.rs +++ /dev/null @@ -1,135 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; - -use bytes::BytesMut; - -// A Miri run detects UB, seen on this playground: -// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915 - -fn bench_header_parsing(c: &mut Criterion) { - c.bench_function("Original (Unsound) [short]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ_SHORT); - _original::parse_headers(&mut buf); - }) - }); - - c.bench_function("New (safe) [short]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ_SHORT); - _new::parse_headers(&mut buf); - }) - }); - - c.bench_function("Original (Unsound) [realistic]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ); - _original::parse_headers(&mut buf); - }) - }); - - c.bench_function("New (safe) [realistic]", |b| { - b.iter(|| { - let mut buf = BytesMut::from(REQ); - _new::parse_headers(&mut buf); - }) - }); -} - -criterion_group!(benches, bench_header_parsing); -criterion_main!(benches); - -const MAX_HEADERS: usize = 96; - -const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] = - [httparse::EMPTY_HEADER; MAX_HEADERS]; - -#[derive(Clone, Copy)] -struct HeaderIndex { - name: (usize, usize), - value: (usize, usize), -} - -const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex { - name: (0, 0), - value: (0, 0), -}; - -const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS]; - -impl HeaderIndex { - fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) { - let bytes_ptr = bytes.as_ptr() as usize; - for (header, indices) in headers.iter().zip(indices.iter_mut()) { - let name_start = header.name.as_ptr() as usize - bytes_ptr; - let name_end = name_start + header.name.len(); - indices.name = (name_start, name_end); - let value_start = header.value.as_ptr() as usize - bytes_ptr; - let value_end = value_start + header.value.len(); - indices.value = (value_start, value_end); - } - } -} - -// test cases taken from: -// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs - -const REQ_SHORT: &[u8] = b"\ -GET / HTTP/1.0\r\n\ -Host: example.com\r\n\ -Cookie: session=60; user_id=1\r\n\r\n"; - -const REQ: &[u8] = b"\ -GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\ -Host: www.kittyhell.com\r\n\ -User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\ -Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\ -Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\ -Accept-Encoding: gzip,deflate\r\n\ -Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\ -Keep-Alive: 115\r\n\ -Connection: keep-alive\r\n\ -Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n"; - -mod _new { - use super::*; - - pub fn parse_headers(src: &mut BytesMut) -> usize { - let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY; - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY; - - let mut req = httparse::Request::new(&mut parsed); - match req.parse(src).unwrap() { - httparse::Status::Complete(_len) => { - HeaderIndex::record(src, req.headers, &mut headers); - req.headers.len() - } - _ => unreachable!(), - } - } -} - -mod _original { - use super::*; - - use std::mem::MaybeUninit; - - pub fn parse_headers(src: &mut BytesMut) -> usize { - #![allow(invalid_value, clippy::uninit_assumed_init)] - - let mut headers: [HeaderIndex; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - - #[allow(invalid_value)] - let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = - unsafe { MaybeUninit::uninit().assume_init() }; - - let mut req = httparse::Request::new(&mut parsed); - match req.parse(src).unwrap() { - httparse::Status::Complete(_len) => { - HeaderIndex::record(src, req.headers, &mut headers); - req.headers.len() - } - _ => unreachable!(), - } - } -} diff --git a/actix-http/benches/write-camel-case.rs b/actix-http/benches/write-camel-case.rs deleted file mode 100644 index ccf09b37e..000000000 --- a/actix-http/benches/write-camel-case.rs +++ /dev/null @@ -1,93 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; - -fn bench_write_camel_case(c: &mut Criterion) { - let mut group = c.benchmark_group("write_camel_case"); - - let names = ["connection", "Transfer-Encoding", "transfer-encoding"]; - - for &i in &names { - let bts = i.as_bytes(); - - group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| { - b.iter(|| { - let mut buf = black_box([0; 24]); - _original::write_camel_case(black_box(bts), &mut buf) - }); - }); - - group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| { - b.iter(|| { - let mut buf = black_box([0; 24]); - let len = black_box(bts.len()); - _new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len) - }); - }); - } - - group.finish(); -} - -criterion_group!(benches, bench_write_camel_case); -criterion_main!(benches); - -mod _new { - pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) { - // first copy entire (potentially wrong) slice to output - let buffer = unsafe { - std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len); - std::slice::from_raw_parts_mut(buf, len) - }; - - let mut iter = value.iter(); - - // first character should be uppercase - if let Some(c @ b'a'..=b'z') = iter.next() { - buffer[0] = c & 0b1101_1111; - } - - // track 1 ahead of the current position since that's the location being assigned to - let mut index = 2; - - // remaining characters after hyphens should also be uppercase - while let Some(&c) = iter.next() { - if c == b'-' { - // advance iter by one and uppercase if needed - if let Some(c @ b'a'..=b'z') = iter.next() { - buffer[index] = c & 0b1101_1111; - } - } - - index += 1; - } - } -} - -mod _original { - pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) { - let mut index = 0; - let key = value; - let mut key_iter = key.iter(); - - if let Some(c) = key_iter.next() { - if *c >= b'a' && *c <= b'z' { - buffer[index] = *c ^ b' '; - index += 1; - } - } else { - return; - } - - while let Some(c) = key_iter.next() { - buffer[index] = *c; - index += 1; - if *c == b'-' { - if let Some(c) = key_iter.next() { - if *c >= b'a' && *c <= b'z' { - buffer[index] = *c ^ b' '; - index += 1; - } - } - } - } - } -} From 19c9d858f25e8262e14546f430d713addb397e96 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sun, 12 Mar 2023 04:29:22 +0000 Subject: [PATCH 05/12] support 16 extractors --- actix-web/CHANGES.md | 8 ++++++++ actix-web/src/extract.rs | 4 ++++ actix-web/src/handler.rs | 5 +++++ 3 files changed, 17 insertions(+) diff --git a/actix-web/CHANGES.md b/actix-web/CHANGES.md index 9a768a122..757e31eeb 100644 --- a/actix-web/CHANGES.md +++ b/actix-web/CHANGES.md @@ -2,10 +2,18 @@ ## Unreleased - 2023-xx-xx +### Added + - Add `Resource::{get, post, etc...}` methods for more concisely adding routes that don't need additional guards. +### Changed + +- Handler functions can now receive up to 16 extractor parameters. + ## 4.3.1 - 2023-02-26 +### Added + - Add support for custom methods with the `#[route]` macro. [#2969] [#2969]: https://github.com/actix/actix-web/pull/2969 diff --git a/actix-web/src/extract.rs b/actix-web/src/extract.rs index d4f5cc91f..84904a9eb 100644 --- a/actix-web/src/extract.rs +++ b/actix-web/src/extract.rs @@ -416,6 +416,10 @@ mod tuple_from_req { tuple_from_req! { TupleFromRequest10; A, B, C, D, E, F, G, H, I, J } tuple_from_req! { TupleFromRequest11; A, B, C, D, E, F, G, H, I, J, K } tuple_from_req! { TupleFromRequest12; A, B, C, D, E, F, G, H, I, J, K, L } + tuple_from_req! { TupleFromRequest13; A, B, C, D, E, F, G, H, I, J, K, L, M } + tuple_from_req! { TupleFromRequest14; A, B, C, D, E, F, G, H, I, J, K, L, M, N } + tuple_from_req! { TupleFromRequest15; A, B, C, D, E, F, G, H, I, J, K, L, M, N, O } + tuple_from_req! { TupleFromRequest16; A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P } } #[cfg(test)] diff --git a/actix-web/src/handler.rs b/actix-web/src/handler.rs index 522a48b82..0c5e58e28 100644 --- a/actix-web/src/handler.rs +++ b/actix-web/src/handler.rs @@ -151,6 +151,10 @@ factory_tuple! { A B C D E F G H I } factory_tuple! { A B C D E F G H I J } factory_tuple! { A B C D E F G H I J K } factory_tuple! { A B C D E F G H I J K L } +factory_tuple! { A B C D E F G H I J K L M } +factory_tuple! { A B C D E F G H I J K L M N } +factory_tuple! { A B C D E F G H I J K L M N O } +factory_tuple! { A B C D E F G H I J K L M N O P } #[cfg(test)] mod tests { @@ -167,6 +171,7 @@ mod tests { async fn handler_max( _01: (), _02: (), _03: (), _04: (), _05: (), _06: (), _07: (), _08: (), _09: (), _10: (), _11: (), _12: (), + _13: (), _14: (), _15: (), _16: (), ) {} assert_impl_handler(handler_min); From dfaca18584cc36c08506cf8433ac2d118e450856 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Sun, 12 Mar 2023 15:32:07 +0000 Subject: [PATCH 06/12] add compression_responses benchmark (#3001) --- actix-http/Cargo.toml | 5 ++ .../benches/response-body-compression.rs | 90 +++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 actix-http/benches/response-body-compression.rs diff --git a/actix-http/Cargo.toml b/actix-http/Cargo.toml index ce653f440..235e4e980 100644 --- a/actix-http/Cargo.toml +++ b/actix-http/Cargo.toml @@ -124,3 +124,8 @@ tokio = { version = "1.24.2", features = ["net", "rt", "macros"] } [[example]] name = "ws" required-features = ["ws", "rustls"] + +[[bench]] +name = "response-body-compression" +harness = false +required-features = ["compress-brotli", "compress-gzip", "compress-zstd"] diff --git a/actix-http/benches/response-body-compression.rs b/actix-http/benches/response-body-compression.rs new file mode 100644 index 000000000..d128bf75b --- /dev/null +++ b/actix-http/benches/response-body-compression.rs @@ -0,0 +1,90 @@ +#![allow(clippy::uninlined_format_args)] + +use std::convert::Infallible; + +use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode}; +use actix_service::{fn_service, Service as _}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; + +static BODY: &[u8] = include_bytes!("../Cargo.toml"); + +fn compression_responses(c: &mut Criterion) { + let mut group = c.benchmark_group("compression responses"); + + group.bench_function("identity", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Identity, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("gzip", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Gzip, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("br", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Brotli, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.bench_function("zstd", |b| { + let rt = actix_rt::Runtime::new().unwrap(); + + let identity_svc = fn_service(|_: Request| async move { + let mut res = Response::with_body(StatusCode::OK, ()); + let body = black_box(Encoder::response( + ContentEncoding::Zstd, + res.head_mut(), + BODY, + )); + Ok::<_, Infallible>(black_box(res.set_body(black_box(body)))) + }); + + b.iter(|| { + rt.block_on(identity_svc.call(Request::new())).unwrap(); + }); + }); + + group.finish(); +} + +criterion_group!(benches, compression_responses); +criterion_main!(benches); From 9e7a6fe57bfffc36d199c3e729f3470d10a381aa Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 13:31:48 +0000 Subject: [PATCH 07/12] add `body::to_bytes_limited` (#3000 * add body::to_body_limit * rename to_bytes_limited --- actix-http/CHANGES.md | 5 ++ actix-http/src/body/mod.rs | 2 +- actix-http/src/body/utils.rs | 141 ++++++++++++++++++++++++++++++++--- 3 files changed, 135 insertions(+), 13 deletions(-) diff --git a/actix-http/CHANGES.md b/actix-http/CHANGES.md index 831a0bcd0..aaf84d765 100644 --- a/actix-http/CHANGES.md +++ b/actix-http/CHANGES.md @@ -2,6 +2,11 @@ ## Unreleased - 2023-xx-xx +### Added + +- Add `body::to_body_limit()` function. +- Add `body::BodyLimitExceeded` error type. + ## 3.3.1 - 2023-03-02 ### Fixed diff --git a/actix-http/src/body/mod.rs b/actix-http/src/body/mod.rs index 0fb090eb5..d1708b9d5 100644 --- a/actix-http/src/body/mod.rs +++ b/actix-http/src/body/mod.rs @@ -22,4 +22,4 @@ pub(crate) use self::message_body::MessageBodyMapErr; pub use self::none::None; pub use self::size::BodySize; pub use self::sized_stream::SizedStream; -pub use self::utils::to_bytes; +pub use self::utils::{to_bytes, to_bytes_limited, BodyLimitExceeded}; diff --git a/actix-http/src/body/utils.rs b/actix-http/src/body/utils.rs index 0a6fb0c15..c741eefd2 100644 --- a/actix-http/src/body/utils.rs +++ b/actix-http/src/body/utils.rs @@ -7,71 +7,188 @@ use futures_core::ready; use super::{BodySize, MessageBody}; -/// Collects the body produced by a `MessageBody` implementation into `Bytes`. +/// Collects all the bytes produced by `body`. /// /// Any errors produced by the body stream are returned immediately. /// +/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion. +/// /// # Examples +/// /// ``` /// use actix_http::body::{self, to_bytes}; /// use bytes::Bytes; /// -/// # async fn test_to_bytes() { +/// # actix_rt::System::new().block_on(async { /// let body = body::None::new(); /// let bytes = to_bytes(body).await.unwrap(); /// assert!(bytes.is_empty()); /// /// let body = Bytes::from_static(b"123"); /// let bytes = to_bytes(body).await.unwrap(); -/// assert_eq!(bytes, b"123"[..]); -/// # } +/// assert_eq!(bytes, "123"); +/// # }); /// ``` pub async fn to_bytes(body: B) -> Result { + to_bytes_limited(body, usize::MAX) + .await + .expect("body should never overflow usize::MAX") +} + +/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit. +#[derive(Debug)] +#[non_exhaustive] +pub struct BodyLimitExceeded; + +/// Collects the bytes produced by `body`, up to `limit` bytes. +/// +/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an +/// `Err(BodyLimitExceeded)` is returned. +/// +/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`. +/// +/// # Examples +/// +/// ``` +/// use actix_http::body::{self, to_bytes_limited}; +/// use bytes::Bytes; +/// +/// # actix_rt::System::new().block_on(async { +/// let body = body::None::new(); +/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap(); +/// assert!(bytes.is_empty()); +/// +/// let body = Bytes::from_static(b"123"); +/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap(); +/// assert_eq!(bytes, "123"); +/// +/// let body = Bytes::from_static(b"123"); +/// assert!(to_bytes_limited(body, 2).await.is_err()); +/// # }); +/// ``` +pub async fn to_bytes_limited( + body: B, + limit: usize, +) -> Result, BodyLimitExceeded> { let cap = match body.size() { - BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()), + BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())), + BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded), BodySize::Sized(size) => size as usize, // good enough first guess for chunk size BodySize::Stream => 32_768, }; + let mut exceeded_limit = false; let mut buf = BytesMut::with_capacity(cap); pin!(body); - poll_fn(|cx| loop { + match poll_fn(|cx| loop { let body = body.as_mut(); match ready!(body.poll_next(cx)) { - Some(Ok(bytes)) => buf.extend_from_slice(&bytes), + Some(Ok(bytes)) => { + // if limit is exceeded... + if buf.len() + bytes.len() > limit { + // ...set flag to true and break out of poll_fn + exceeded_limit = true; + return Poll::Ready(Ok(())); + } + + buf.extend_from_slice(&bytes) + } None => return Poll::Ready(Ok(())), Some(Err(err)) => return Poll::Ready(Err(err)), } }) - .await?; + .await + { + // propagate error returned from body poll + Err(err) => Ok(Err(err)), - Ok(buf.freeze()) + // limit was exceeded while reading body + Ok(()) if exceeded_limit => Err(BodyLimitExceeded), + + // otherwise return body buffer + Ok(()) => Ok(Ok(buf.freeze())), + } } #[cfg(test)] -mod test { +mod tests { + use std::io; + use futures_util::{stream, StreamExt as _}; use super::*; - use crate::{body::BodyStream, Error}; + use crate::{ + body::{BodyStream, SizedStream}, + Error, + }; #[actix_rt::test] - async fn test_to_bytes() { + async fn to_bytes_complete() { let bytes = to_bytes(()).await.unwrap(); assert!(bytes.is_empty()); let body = Bytes::from_static(b"123"); let bytes = to_bytes(body).await.unwrap(); assert_eq!(bytes, b"123"[..]); + } + #[actix_rt::test] + async fn to_bytes_streams() { let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")]) .map(Ok::<_, Error>); let body = BodyStream::new(stream); let bytes = to_bytes(body).await.unwrap(); assert_eq!(bytes, b"123abc"[..]); } + + #[actix_rt::test] + async fn to_bytes_limited_complete() { + let bytes = to_bytes_limited((), 0).await.unwrap().unwrap(); + assert!(bytes.is_empty()); + + let bytes = to_bytes_limited((), 1).await.unwrap().unwrap(); + assert!(bytes.is_empty()); + + assert!(to_bytes_limited(Bytes::from_static(b"12"), 0) + .await + .is_err()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 1) + .await + .is_err()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok()); + assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok()); + } + + #[actix_rt::test] + async fn to_bytes_limited_streams() { + // hinting a larger body fails + let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>)); + assert!(to_bytes_limited(body, 3).await.is_err()); + + // hinting a smaller body is okay + let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>)); + assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty()); + + // hinting a smaller body then returning a larger one fails + let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>); + let body = SizedStream::new(3, stream); + assert!(to_bytes_limited(body, 3).await.is_err()); + + let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")]) + .map(Ok::<_, Error>); + let body = BodyStream::new(stream); + assert!(to_bytes_limited(body, 3).await.is_err()); + } + + #[actix_rt::test] + async fn to_body_limit_error() { + let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) }); + let body = SizedStream::new(8, err_stream); + // not too big, but propagates error from body stream + assert!(to_bytes_limited(body, 10).await.unwrap().is_err()); + } } From 44c5cdaa107e4c5bc220428f44166441144c2238 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 13:40:07 +0000 Subject: [PATCH 08/12] bound initial allocation in to_bytes_limited --- actix-http/src/body/utils.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/actix-http/src/body/utils.rs b/actix-http/src/body/utils.rs index c741eefd2..7af5a50ad 100644 --- a/actix-http/src/body/utils.rs +++ b/actix-http/src/body/utils.rs @@ -32,7 +32,7 @@ use super::{BodySize, MessageBody}; pub async fn to_bytes(body: B) -> Result { to_bytes_limited(body, usize::MAX) .await - .expect("body should never overflow usize::MAX") + .expect("body should never yield more than usize::MAX bytes") } /// Error type returned from [`to_bytes_limited`] when body produced exceeds limit. @@ -70,12 +70,14 @@ pub async fn to_bytes_limited( body: B, limit: usize, ) -> Result, BodyLimitExceeded> { + /// Sensible default (32kB) for initial, bounded allocation when collecting body bytes. + const INITIAL_ALLOC_BYTES: usize = 32 * 1024; + let cap = match body.size() { BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())), BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded), - BodySize::Sized(size) => size as usize, - // good enough first guess for chunk size - BodySize::Stream => 32_768, + BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES), + BodySize::Stream => INITIAL_ALLOC_BYTES, }; let mut exceeded_limit = false; From 0e7380659f844d216b206faaae6abd51f9d1aeea Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 13:40:09 +0000 Subject: [PATCH 09/12] implement Error for BodyLimitExceeded --- actix-http/src/body/utils.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/actix-http/src/body/utils.rs b/actix-http/src/body/utils.rs index 7af5a50ad..d1449179f 100644 --- a/actix-http/src/body/utils.rs +++ b/actix-http/src/body/utils.rs @@ -3,6 +3,7 @@ use std::task::Poll; use actix_rt::pin; use actix_utils::future::poll_fn; use bytes::{Bytes, BytesMut}; +use derive_more::{Display, Error}; use futures_core::ready; use super::{BodySize, MessageBody}; @@ -36,7 +37,8 @@ pub async fn to_bytes(body: B) -> Result { } /// Error type returned from [`to_bytes_limited`] when body produced exceeds limit. -#[derive(Debug)] +#[derive(Debug, Display, Error)] +#[display(fmt = "limit exceeded while collecting body bytes")] #[non_exhaustive] pub struct BodyLimitExceeded; From bfdc29ebb8d93f5cc276ed32e81bed37bceb4fa6 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 14:22:50 +0000 Subject: [PATCH 10/12] normalize actix-files error messages --- actix-files/src/error.rs | 32 +++++++++++----------- actix-files/src/range.rs | 58 +++++++++++++++++++++++++++++----------- 2 files changed, 58 insertions(+), 32 deletions(-) diff --git a/actix-files/src/error.rs b/actix-files/src/error.rs index 2f3a36cd1..68cea8d89 100644 --- a/actix-files/src/error.rs +++ b/actix-files/src/error.rs @@ -4,46 +4,44 @@ use derive_more::Display; /// Errors which can occur when serving static files. #[derive(Debug, PartialEq, Eq, Display)] pub enum FilesError { - /// Path is not a directory + /// Path is not a directory. #[allow(dead_code)] - #[display(fmt = "Path is not a directory. Unable to serve static files")] + #[display(fmt = "path is not a directory. Unable to serve static files")] IsNotDirectory, - /// Cannot render directory - #[display(fmt = "Unable to render directory without index file")] + /// Cannot render directory. + #[display(fmt = "unable to render directory without index file")] IsDirectory, } -/// Return `NotFound` for `FilesError` impl ResponseError for FilesError { + /// Returns `404 Not Found`. fn status_code(&self) -> StatusCode { StatusCode::NOT_FOUND } } -#[allow(clippy::enum_variant_names)] #[derive(Debug, PartialEq, Eq, Display)] #[non_exhaustive] pub enum UriSegmentError { - /// The segment started with the wrapped invalid character. - #[display(fmt = "The segment started with the wrapped invalid character")] + /// Segment started with the wrapped invalid character. + #[display(fmt = "segment started with invalid character: ('{_0}')")] BadStart(char), - /// The segment contained the wrapped invalid character. - #[display(fmt = "The segment contained the wrapped invalid character")] + /// Segment contained the wrapped invalid character. + #[display(fmt = "segment contained invalid character ('{_0}')")] BadChar(char), - /// The segment ended with the wrapped invalid character. - #[display(fmt = "The segment ended with the wrapped invalid character")] + /// Segment ended with the wrapped invalid character. + #[display(fmt = "segment ended with invalid character: ('{_0}')")] BadEnd(char), - - /// The path is not a valid UTF-8 string after doing percent decoding. - #[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")] - NotValidUtf8, + // /// Path is not a valid UTF-8 string after percent-decoding. + // #[display(fmt = "path is not a valid UTF-8 string after percent-decoding")] + // NotValidUtf8, } -/// Return `BadRequest` for `UriSegmentError` impl ResponseError for UriSegmentError { + /// Returns `400 Bad Request`. fn status_code(&self) -> StatusCode { StatusCode::BAD_REQUEST } diff --git a/actix-files/src/range.rs b/actix-files/src/range.rs index 8d9fe9445..65c680ede 100644 --- a/actix-files/src/range.rs +++ b/actix-files/src/range.rs @@ -1,4 +1,36 @@ -use derive_more::{Display, Error}; +use std::fmt; + +use derive_more::Error; + +/// Copy of `http_range::HttpRangeParseError`. +#[derive(Debug, Clone)] +enum HttpRangeParseError { + InvalidRange, + NoOverlap, +} + +impl From for HttpRangeParseError { + fn from(err: http_range::HttpRangeParseError) -> Self { + match err { + http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange, + http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap, + } + } +} + +#[derive(Debug, Clone, Error)] +#[non_exhaustive] +pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError); + +impl fmt::Display for ParseRangeErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid Range header: ")?; + f.write_str(match self.0 { + HttpRangeParseError::InvalidRange => "invalid syntax", + HttpRangeParseError::NoOverlap => "range starts after end of content", + }) + } +} /// HTTP Range header representation. #[derive(Debug, Clone, Copy)] @@ -10,26 +42,22 @@ pub struct HttpRange { pub length: u64, } -#[derive(Debug, Clone, Display, Error)] -#[display(fmt = "Parse HTTP Range failed")] -pub struct ParseRangeErr(#[error(not(source))] ()); - impl HttpRange { /// Parses Range HTTP header string as per RFC 2616. /// /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `size` is full size of response (file). pub fn parse(header: &str, size: u64) -> Result, ParseRangeErr> { - match http_range::HttpRange::parse(header, size) { - Ok(ranges) => Ok(ranges - .iter() - .map(|range| HttpRange { - start: range.start, - length: range.length, - }) - .collect()), - Err(_) => Err(ParseRangeErr(())), - } + let ranges = http_range::HttpRange::parse(header, size) + .map_err(|err| ParseRangeErr(err.into()))?; + + Ok(ranges + .iter() + .map(|range| HttpRange { + start: range.start, + length: range.length, + }) + .collect()) } } From 442fa279dad32c34556624fdcbad077cd415bd09 Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 14:30:21 +0000 Subject: [PATCH 11/12] uncomment error variant --- actix-files/src/error.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/actix-files/src/error.rs b/actix-files/src/error.rs index 68cea8d89..d614651fc 100644 --- a/actix-files/src/error.rs +++ b/actix-files/src/error.rs @@ -35,9 +35,10 @@ pub enum UriSegmentError { /// Segment ended with the wrapped invalid character. #[display(fmt = "segment ended with invalid character: ('{_0}')")] BadEnd(char), - // /// Path is not a valid UTF-8 string after percent-decoding. - // #[display(fmt = "path is not a valid UTF-8 string after percent-decoding")] - // NotValidUtf8, + + /// Path is not a valid UTF-8 string after percent-decoding. + #[display(fmt = "path is not a valid UTF-8 string after percent-decoding")] + NotValidUtf8, } impl ResponseError for UriSegmentError { From 5e29726c4fa725b1bf3a0317c57e3f0d1b3e327c Mon Sep 17 00:00:00 2001 From: Rob Ede Date: Mon, 13 Mar 2023 17:17:02 +0000 Subject: [PATCH 12/12] standardize error messages in actix-http --- actix-http/src/error.rs | 66 +++++++++++++-------------- actix-http/src/ws/mod.rs | 32 ++++++------- actix-http/tests/test_ws.rs | 6 +-- actix-web/src/error/response_error.rs | 2 +- 4 files changed, 52 insertions(+), 54 deletions(-) diff --git a/actix-http/src/error.rs b/actix-http/src/error.rs index 41522a254..fa0228a50 100644 --- a/actix-http/src/error.rs +++ b/actix-http/src/error.rs @@ -161,44 +161,44 @@ impl From for Error { #[non_exhaustive] pub enum ParseError { /// An invalid `Method`, such as `GE.T`. - #[display(fmt = "Invalid Method specified")] + #[display(fmt = "invalid method specified")] Method, /// An invalid `Uri`, such as `exam ple.domain`. - #[display(fmt = "Uri error: {}", _0)] + #[display(fmt = "URI error: {}", _0)] Uri(InvalidUri), /// An invalid `HttpVersion`, such as `HTP/1.1` - #[display(fmt = "Invalid HTTP version specified")] + #[display(fmt = "invalid HTTP version specified")] Version, /// An invalid `Header`. - #[display(fmt = "Invalid Header provided")] + #[display(fmt = "invalid Header provided")] Header, /// A message head is too large to be reasonable. - #[display(fmt = "Message head is too large")] + #[display(fmt = "message head is too large")] TooLarge, /// A message reached EOF, but is not complete. - #[display(fmt = "Message is incomplete")] + #[display(fmt = "message is incomplete")] Incomplete, /// An invalid `Status`, such as `1337 ELITE`. - #[display(fmt = "Invalid Status provided")] + #[display(fmt = "invalid status provided")] Status, /// A timeout occurred waiting for an IO event. #[allow(dead_code)] - #[display(fmt = "Timeout")] + #[display(fmt = "timeout")] Timeout, - /// An `io::Error` that occurred while trying to read or write to a network stream. - #[display(fmt = "IO error: {}", _0)] + /// An I/O error that occurred while trying to read or write to a network stream. + #[display(fmt = "I/O error: {}", _0)] Io(io::Error), /// Parsing a field as string failed. - #[display(fmt = "UTF8 error: {}", _0)] + #[display(fmt = "UTF-8 error: {}", _0)] Utf8(Utf8Error), } @@ -257,22 +257,19 @@ impl From for Response { #[non_exhaustive] pub enum PayloadError { /// A payload reached EOF, but is not complete. - #[display( - fmt = "A payload reached EOF, but is not complete. Inner error: {:?}", - _0 - )] + #[display(fmt = "payload reached EOF before completing: {:?}", _0)] Incomplete(Option), /// Content encoding stream corruption. - #[display(fmt = "Can not decode content-encoding.")] + #[display(fmt = "can not decode content-encoding")] EncodingCorrupted, /// Payload reached size limit. - #[display(fmt = "Payload reached size limit.")] + #[display(fmt = "payload reached size limit")] Overflow, /// Payload length is unknown. - #[display(fmt = "Payload length is unknown.")] + #[display(fmt = "payload length is unknown")] UnknownLength, /// HTTP/2 payload error. @@ -330,22 +327,23 @@ impl From for Error { #[non_exhaustive] pub enum DispatchError { /// Service error. - #[display(fmt = "Service Error")] + #[display(fmt = "service error")] Service(Response), /// Body streaming error. - #[display(fmt = "Body error: {}", _0)] + #[display(fmt = "body error: {}", _0)] Body(Box), /// Upgrade service error. + #[display(fmt = "upgrade error")] Upgrade, /// An `io::Error` that occurred while trying to read or write to a network stream. - #[display(fmt = "IO error: {}", _0)] + #[display(fmt = "I/O error: {}", _0)] Io(io::Error), /// Request parse error. - #[display(fmt = "Request parse error: {}", _0)] + #[display(fmt = "request parse error: {}", _0)] Parse(ParseError), /// HTTP/2 error. @@ -354,19 +352,19 @@ pub enum DispatchError { H2(h2::Error), /// The first request did not complete within the specified timeout. - #[display(fmt = "The first request did not complete within the specified timeout")] + #[display(fmt = "request did not complete within the specified timeout")] SlowRequestTimeout, - /// Disconnect timeout. Makes sense for ssl streams. - #[display(fmt = "Connection shutdown timeout")] + /// Disconnect timeout. Makes sense for TLS streams. + #[display(fmt = "connection shutdown timeout")] DisconnectTimeout, /// Handler dropped payload before reading EOF. - #[display(fmt = "Handler dropped payload before reading EOF")] + #[display(fmt = "handler dropped payload before reading EOF")] HandlerDroppedPayload, /// Internal error. - #[display(fmt = "Internal error")] + #[display(fmt = "internal error")] InternalError, } @@ -391,12 +389,12 @@ impl StdError for DispatchError { #[cfg_attr(test, derive(PartialEq, Eq))] #[non_exhaustive] pub enum ContentTypeError { - /// Can not parse content type - #[display(fmt = "Can not parse content type")] + /// Can not parse content type. + #[display(fmt = "could not parse content type")] ParseError, - /// Unknown content encoding - #[display(fmt = "Unknown content encoding")] + /// Unknown content encoding. + #[display(fmt = "unknown content encoding")] UnknownEncoding, } @@ -424,7 +422,7 @@ mod tests { let err: Error = ParseError::Io(orig).into(); assert_eq!( format!("{}", err), - "error parsing HTTP message: IO error: other" + "error parsing HTTP message: I/O error: other" ); } @@ -451,7 +449,7 @@ mod tests { let err = PayloadError::Incomplete(None); assert_eq!( err.to_string(), - "A payload reached EOF, but is not complete. Inner error: None" + "payload reached EOF before completing: None" ); } @@ -471,7 +469,7 @@ mod tests { match ParseError::from($from) { e @ $error => { let desc = format!("{}", e); - assert_eq!(desc, format!("IO error: {}", $from)); + assert_eq!(desc, format!("I/O error: {}", $from)); } _ => unreachable!("{:?}", $from), } diff --git a/actix-http/src/ws/mod.rs b/actix-http/src/ws/mod.rs index 75d4ca628..2a0b0a99c 100644 --- a/actix-http/src/ws/mod.rs +++ b/actix-http/src/ws/mod.rs @@ -26,39 +26,39 @@ pub use self::proto::{hash_key, CloseCode, CloseReason, OpCode}; #[derive(Debug, Display, Error, From)] pub enum ProtocolError { /// Received an unmasked frame from client. - #[display(fmt = "Received an unmasked frame from client.")] + #[display(fmt = "received an unmasked frame from client")] UnmaskedFrame, /// Received a masked frame from server. - #[display(fmt = "Received a masked frame from server.")] + #[display(fmt = "received a masked frame from server")] MaskedFrame, /// Encountered invalid opcode. - #[display(fmt = "Invalid opcode: {}.", _0)] + #[display(fmt = "invalid opcode ({})", _0)] InvalidOpcode(#[error(not(source))] u8), /// Invalid control frame length - #[display(fmt = "Invalid control frame length: {}.", _0)] + #[display(fmt = "invalid control frame length ({})", _0)] InvalidLength(#[error(not(source))] usize), /// Bad opcode. - #[display(fmt = "Bad opcode.")] + #[display(fmt = "bad opcode")] BadOpCode, /// A payload reached size limit. - #[display(fmt = "A payload reached size limit.")] + #[display(fmt = "payload reached size limit")] Overflow, - /// Continuation is not started. - #[display(fmt = "Continuation is not started.")] + /// Continuation has not started. + #[display(fmt = "continuation has not started")] ContinuationNotStarted, /// Received new continuation but it is already started. - #[display(fmt = "Received new continuation but it is already started.")] + #[display(fmt = "received new continuation but it has already started")] ContinuationStarted, /// Unknown continuation fragment. - #[display(fmt = "Unknown continuation fragment: {}.", _0)] + #[display(fmt = "unknown continuation fragment: {}", _0)] ContinuationFragment(#[error(not(source))] OpCode), /// I/O error. @@ -70,27 +70,27 @@ pub enum ProtocolError { #[derive(Debug, Clone, Copy, PartialEq, Eq, Display, Error)] pub enum HandshakeError { /// Only get method is allowed. - #[display(fmt = "Method not allowed.")] + #[display(fmt = "method not allowed")] GetMethodRequired, /// Upgrade header if not set to WebSocket. - #[display(fmt = "WebSocket upgrade is expected.")] + #[display(fmt = "WebSocket upgrade is expected")] NoWebsocketUpgrade, /// Connection header is not set to upgrade. - #[display(fmt = "Connection upgrade is expected.")] + #[display(fmt = "connection upgrade is expected")] NoConnectionUpgrade, /// WebSocket version header is not set. - #[display(fmt = "WebSocket version header is required.")] + #[display(fmt = "WebSocket version header is required")] NoVersionHeader, /// Unsupported WebSocket version. - #[display(fmt = "Unsupported WebSocket version.")] + #[display(fmt = "unsupported WebSocket version")] UnsupportedVersion, /// WebSocket key is not set or wrong. - #[display(fmt = "Unknown websocket key.")] + #[display(fmt = "unknown WebSocket key")] BadWebsocketKey, } diff --git a/actix-http/tests/test_ws.rs b/actix-http/tests/test_ws.rs index a9c1acd33..a2866613b 100644 --- a/actix-http/tests/test_ws.rs +++ b/actix-http/tests/test_ws.rs @@ -39,13 +39,13 @@ impl WsService { #[derive(Debug, Display, Error, From)] enum WsServiceError { - #[display(fmt = "http error")] + #[display(fmt = "HTTP error")] Http(actix_http::Error), - #[display(fmt = "ws handshake error")] + #[display(fmt = "WS handshake error")] Ws(actix_http::ws::HandshakeError), - #[display(fmt = "io error")] + #[display(fmt = "I/O error")] Io(std::io::Error), #[display(fmt = "dispatcher error")] diff --git a/actix-web/src/error/response_error.rs b/actix-web/src/error/response_error.rs index 7d2c06154..f5d8cf467 100644 --- a/actix-web/src/error/response_error.rs +++ b/actix-web/src/error/response_error.rs @@ -152,7 +152,7 @@ mod tests { let resp_err: &dyn ResponseError = &err; let err = resp_err.downcast_ref::().unwrap(); - assert_eq!(err.to_string(), "Payload reached size limit."); + assert_eq!(err.to_string(), "payload reached size limit"); let not_err = resp_err.downcast_ref::(); assert!(not_err.is_none());