1
0
mirror of https://github.com/actix/actix-extras.git synced 2024-11-28 01:32:57 +01:00

cleanup h1 parse

This commit is contained in:
Nikolay Kim 2017-12-15 13:10:12 -08:00
parent 71c37bde5a
commit c3d5e4301a
2 changed files with 78 additions and 90 deletions

View File

@ -54,7 +54,7 @@ Each result is best of five runs. All measurements are req/sec.
Name | 1 thread | 1 pipeline | 3 thread | 3 pipeline | 8 thread | 8 pipeline
---- | -------- | ---------- | -------- | ---------- | -------- | ----------
Actix | 87.200 | 813.200 | 122.100 | 1.877.000 | 107.400 | 2.390.000
Actix | 89.100 | 815.200 | 122.100 | 1.877.000 | 107.400 | 2.350.000
Gotham | 61.000 | 178.000 | | | |
Iron | | | | | 94.500 | 78.000
Rocket | | | | | 95.500 | failed
@ -65,7 +65,7 @@ Some notes on results. Iron and Rocket got tested with 8 threads,
which showed best results. Gothan and tokio-minihttp seem does not support
multithreading, or at least i couldn't figured out. I manually enabled pipelining
for *Shio* and Gotham*. While shio seems support multithreading, but it showed
absolutly same results for any how number of threads (maybe macos?)
absolutly same results for any how number of threads (maybe macos problem?)
Rocket completely failed in pipelined tests.
## Examples

116
src/h1.rs
View File

@ -215,10 +215,10 @@ impl<T, H> Http1<T, H>
// read incoming data
while !self.flags.contains(Flags::ERROR) && !self.flags.contains(Flags::H2) &&
self.tasks.len() < MAX_PIPELINED_MESSAGES {
match self.reader.parse(self.stream.get_mut(),
&mut self.read_buf, &self.settings)
self.tasks.len() < MAX_PIPELINED_MESSAGES
{
match self.reader.parse(self.stream.get_mut(),
&mut self.read_buf, &self.settings) {
Ok(Async::Ready(Item::Http1(mut req))) => {
not_ready = false;
@ -405,14 +405,8 @@ impl Reader {
settings: &WorkerSettings<H>) -> Poll<Item, ReaderError>
where T: AsyncRead
{
loop {
match self.decode(buf)? {
Decoding::Paused => return Ok(Async::NotReady),
Decoding::Ready => {
self.payload = None;
break
},
Decoding::NotReady => {
// read payload
if self.payload.is_some() {
match self.read_from_io(io, buf) {
Ok(Async::Ready(0)) => {
if let Some(ref mut payload) = self.payload {
@ -420,11 +414,7 @@ impl Reader {
}
// http channel should not deal with payload errors
return Err(ReaderError::Payload)
}
Ok(Async::Ready(_)) => {
continue
}
Ok(Async::NotReady) => break,
},
Err(err) => {
if let Some(ref mut payload) = self.payload {
payload.tx.set_error(err.into());
@ -432,50 +422,41 @@ impl Reader {
// http channel should not deal with payload errors
return Err(ReaderError::Payload)
}
_ => (),
}
match self.decode(buf)? {
Decoding::Ready => self.payload = None,
Decoding::Paused | Decoding::NotReady => return Ok(Async::NotReady),
}
}
// if buf is empty parse_message will always return NotReady, let's avoid that
let read = if buf.is_empty() {
match self.read_from_io(io, buf) {
Ok(Async::Ready(0)) => {
debug!("Ignored premature client disconnection");
return Err(ReaderError::Disconnect);
},
Ok(Async::Ready(_)) => (),
Ok(Async::NotReady) =>
return Ok(Async::NotReady),
Err(err) =>
return Err(ReaderError::Error(err.into()))
}
}
false
} else {
true
};
loop {
match Reader::parse_message(buf, settings).map_err(ReaderError::Error)? {
Message::Http1(msg, decoder) => {
// process payload
if let Some(payload) = decoder {
self.payload = Some(payload);
loop {
match self.decode(buf)? {
Decoding::Paused =>
break,
Decoding::Ready => {
self.payload = None;
break
},
Decoding::NotReady => {
match self.read_from_io(io, buf) {
Ok(Async::Ready(0)) => {
trace!("parse eof");
if let Some(ref mut payload) = self.payload {
payload.tx.set_error(
PayloadError::Incomplete);
}
// http channel should deal with payload errors
return Err(ReaderError::Payload)
}
Ok(Async::Ready(_)) => {
continue
}
Ok(Async::NotReady) => break,
Err(err) => {
if let Some(ref mut payload) = self.payload {
payload.tx.set_error(err.into());
}
// http channel should deal with payload errors
return Err(ReaderError::Payload)
}
}
}
}
Decoding::Paused | Decoding::NotReady => (),
Decoding::Ready => self.payload = None,
}
}
self.h1 = true;
@ -489,11 +470,10 @@ impl Reader {
},
Message::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
debug!("MAX_BUFFER_SIZE reached, closing");
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ReaderError::Error(ParseError::TooLarge));
}
},
}
if read {
match self.read_from_io(io, buf) {
Ok(Async::Ready(0)) => {
debug!("Ignored premature client disconnection");
@ -505,26 +485,34 @@ impl Reader {
Err(err) =>
return Err(ReaderError::Error(err.into()))
}
} else {
return Ok(Async::NotReady)
}
},
}
}
}
fn read_from_io<T: AsyncRead>(&mut self, io: &mut T, buf: &mut BytesMut)
-> Poll<usize, io::Error> {
-> Poll<usize, io::Error>
{
unsafe {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
}
unsafe {
let n = match io.read(buf.bytes_mut()) {
Ok(n) => n,
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
return Ok(Async::NotReady);
}
return Err(e)
}
};
match io.read(buf.bytes_mut()) {
Ok(n) => {
buf.advance_mut(n);
Ok(Async::Ready(n))
},
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
Ok(Async::NotReady)
} else {
Err(e)
}
}
}
}
}