1
0
mirror of https://github.com/actix/actix-extras.git synced 2024-11-24 07:53:00 +01:00

simplify h1 dispatcher

This commit is contained in:
Nikolay Kim 2018-10-01 19:18:24 -07:00
parent 2217a152cb
commit 91af3ca148
8 changed files with 249 additions and 243 deletions

View File

@ -81,10 +81,6 @@
specialization, // for impl ErrorResponse for std::error::Error specialization, // for impl ErrorResponse for std::error::Error
extern_prelude, extern_prelude,
))] ))]
#![cfg_attr(
feature = "cargo-clippy",
allow(decimal_literal_representation, suspicious_arithmetic_impl)
)]
#![warn(missing_docs)] #![warn(missing_docs)]
#[macro_use] #[macro_use]

View File

@ -36,10 +36,22 @@ pub enum HttpDispatchError {
#[fail(display = "The first request did not complete within the specified timeout")] #[fail(display = "The first request did not complete within the specified timeout")]
SlowRequestTimeout, SlowRequestTimeout,
/// Shutdown timeout
#[fail(display = "Connection shutdown timeout")]
ShutdownTimeout,
/// HTTP2 error /// HTTP2 error
#[fail(display = "HTTP2 error: {}", _0)] #[fail(display = "HTTP2 error: {}", _0)]
Http2(http2::Error), Http2(http2::Error),
/// Malformed request
#[fail(display = "Malformed request")]
MalformedRequest,
/// Internal error
#[fail(display = "Internal error")]
InternalError,
/// Unknown error /// Unknown error
#[fail(display = "Unknown error")] #[fail(display = "Unknown error")]
Unknown, Unknown,

View File

@ -4,6 +4,7 @@ use std::time::Instant;
use bytes::BytesMut; use bytes::BytesMut;
use futures::{Async, Future, Poll}; use futures::{Async, Future, Poll};
use tokio_current_thread::spawn;
use tokio_timer::Delay; use tokio_timer::Delay;
use error::{Error, PayloadError}; use error::{Error, PayloadError};
@ -13,17 +14,16 @@ use payload::{Payload, PayloadStatus, PayloadWriter};
use super::error::{HttpDispatchError, ServerError}; use super::error::{HttpDispatchError, ServerError};
use super::h1decoder::{DecoderError, H1Decoder, Message}; use super::h1decoder::{DecoderError, H1Decoder, Message};
use super::h1writer::H1Writer; use super::h1writer::H1Writer;
use super::handler::{HttpHandler, HttpHandlerTask, HttpHandlerTaskFut};
use super::input::PayloadType; use super::input::PayloadType;
use super::settings::WorkerSettings; use super::settings::WorkerSettings;
use super::Writer; use super::{IoStream, Writer};
use super::{HttpHandler, HttpHandlerTask, IoStream};
const MAX_PIPELINED_MESSAGES: usize = 16; const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! { bitflags! {
pub struct Flags: u8 { pub struct Flags: u8 {
const STARTED = 0b0000_0001; const STARTED = 0b0000_0001;
const ERROR = 0b0000_0010;
const KEEPALIVE = 0b0000_0100; const KEEPALIVE = 0b0000_0100;
const SHUTDOWN = 0b0000_1000; const SHUTDOWN = 0b0000_1000;
const READ_DISCONNECTED = 0b0001_0000; const READ_DISCONNECTED = 0b0001_0000;
@ -32,14 +32,6 @@ bitflags! {
} }
} }
bitflags! {
struct EntryFlags: u8 {
const EOF = 0b0000_0001;
const ERROR = 0b0000_0010;
const FINISHED = 0b0000_0100;
}
}
pub(crate) struct Http1<T: IoStream, H: HttpHandler + 'static> { pub(crate) struct Http1<T: IoStream, H: HttpHandler + 'static> {
flags: Flags, flags: Flags,
settings: WorkerSettings<H>, settings: WorkerSettings<H>,
@ -49,39 +41,40 @@ pub(crate) struct Http1<T: IoStream, H: HttpHandler + 'static> {
payload: Option<PayloadType>, payload: Option<PayloadType>,
buf: BytesMut, buf: BytesMut,
tasks: VecDeque<Entry<H>>, tasks: VecDeque<Entry<H>>,
error: Option<Error>, error: Option<HttpDispatchError>,
ka_enabled: bool, ka_enabled: bool,
ka_expire: Instant, ka_expire: Instant,
ka_timer: Option<Delay>, ka_timer: Option<Delay>,
} }
struct Entry<H: HttpHandler> { enum Entry<H: HttpHandler> {
pipe: EntryPipe<H>,
flags: EntryFlags,
}
enum EntryPipe<H: HttpHandler> {
Task(H::Task), Task(H::Task),
Error(Box<HttpHandlerTask>), Error(Box<HttpHandlerTask>),
} }
impl<H: HttpHandler> EntryPipe<H> { impl<H: HttpHandler> Entry<H> {
fn into_task(self) -> H::Task {
match self {
Entry::Task(task) => task,
Entry::Error(_) => panic!(),
}
}
fn disconnected(&mut self) { fn disconnected(&mut self) {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.disconnected(), Entry::Task(ref mut task) => task.disconnected(),
EntryPipe::Error(ref mut task) => task.disconnected(), Entry::Error(ref mut task) => task.disconnected(),
} }
} }
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> { fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.poll_io(io), Entry::Task(ref mut task) => task.poll_io(io),
EntryPipe::Error(ref mut task) => task.poll_io(io), Entry::Error(ref mut task) => task.poll_io(io),
} }
} }
fn poll_completed(&mut self) -> Poll<(), Error> { fn poll_completed(&mut self) -> Poll<(), Error> {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.poll_completed(), Entry::Task(ref mut task) => task.poll_completed(),
EntryPipe::Error(ref mut task) => task.poll_completed(), Entry::Error(ref mut task) => task.poll_completed(),
} }
} }
} }
@ -136,10 +129,7 @@ where
#[inline] #[inline]
fn can_read(&self) -> bool { fn can_read(&self) -> bool {
if self if self.flags.intersects(Flags::READ_DISCONNECTED) {
.flags
.intersects(Flags::ERROR | Flags::READ_DISCONNECTED)
{
return false; return false;
} }
@ -150,41 +140,46 @@ where
} }
} }
fn write_disconnected(&mut self) { // if checked is set to true, delay disconnect until all tasks have finished.
self.flags.insert(Flags::WRITE_DISCONNECTED); fn client_disconnected(&mut self, checked: bool) {
self.flags.insert(Flags::READ_DISCONNECTED);
// notify all tasks
self.stream.disconnected();
for task in &mut self.tasks {
task.pipe.disconnected();
}
}
fn read_disconnected(&mut self) {
self.flags.insert(
Flags::READ_DISCONNECTED
// on parse error, stop reading stream but tasks need to be
// completed
| Flags::ERROR,
);
if let Some(mut payload) = self.payload.take() { if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete); payload.set_error(PayloadError::Incomplete);
} }
if !checked || self.tasks.is_empty() {
self.flags.insert(Flags::WRITE_DISCONNECTED);
self.stream.disconnected();
// notify all tasks
for mut task in self.tasks.drain(..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
}
}
}
}
} }
#[inline] #[inline]
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
// check connection keep-alive // check connection keep-alive
if !self.poll_keep_alive() { self.poll_keep_alive()?;
return Ok(Async::Ready(()));
}
// shutdown // shutdown
if self.flags.contains(Flags::SHUTDOWN) { if self.flags.contains(Flags::SHUTDOWN) {
if self.flags.intersects( if self
Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED, .flags
) { .intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED)
{
return Ok(Async::Ready(())); return Ok(Async::Ready(()));
} }
match self.stream.poll_completed(true) { match self.stream.poll_completed(true) {
@ -197,44 +192,46 @@ where
} }
} }
self.poll_io(); self.poll_io()?;
loop { if !self.flags.contains(Flags::WRITE_DISCONNECTED) {
match self.poll_handler()? { match self.poll_handler()? {
Async::Ready(true) => { Async::Ready(true) => self.poll(),
self.poll_io();
}
Async::Ready(false) => { Async::Ready(false) => {
self.flags.insert(Flags::SHUTDOWN); self.flags.insert(Flags::SHUTDOWN);
return self.poll(); self.poll()
} }
Async::NotReady => { Async::NotReady => {
// deal with keep-alive and steam eof (client-side write shutdown) // deal with keep-alive and steam eof (client-side write shutdown)
if self.tasks.is_empty() { if self.tasks.is_empty() {
// handle stream eof // handle stream eof
if self.flags.contains(Flags::READ_DISCONNECTED) { if self.flags.intersects(
self.flags.insert(Flags::SHUTDOWN); Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED,
return self.poll(); ) {
return Ok(Async::Ready(()));
} }
// no keep-alive // no keep-alive
if self.flags.contains(Flags::ERROR) if self.flags.contains(Flags::STARTED)
|| (!self.flags.contains(Flags::KEEPALIVE) && (!self.ka_enabled
|| !self.ka_enabled) || !self.flags.contains(Flags::KEEPALIVE))
&& self.flags.contains(Flags::STARTED)
{ {
self.flags.insert(Flags::SHUTDOWN); self.flags.insert(Flags::SHUTDOWN);
return self.poll(); return self.poll();
} }
} }
return Ok(Async::NotReady); Ok(Async::NotReady)
} }
} }
} else if let Some(err) = self.error.take() {
Err(err)
} else {
Ok(Async::Ready(()))
} }
} }
/// keep-alive timer. returns `true` is keep-alive, otherwise drop /// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keep_alive(&mut self) -> bool { fn poll_keep_alive(&mut self) -> Result<(), HttpDispatchError> {
let timer = if let Some(ref mut timer) = self.ka_timer { if let Some(ref mut timer) = self.ka_timer {
match timer.poll() { match timer.poll() {
Ok(Async::Ready(_)) => { Ok(Async::Ready(_)) => {
if timer.deadline() >= self.ka_expire { if timer.deadline() >= self.ka_expire {
@ -242,43 +239,39 @@ where
if self.tasks.is_empty() { if self.tasks.is_empty() {
// if we get timer during shutdown, just drop connection // if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) { if self.flags.contains(Flags::SHUTDOWN) {
return false; return Err(HttpDispatchError::ShutdownTimeout);
} else { } else {
trace!("Keep-alive timeout, close connection"); trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN); self.flags.insert(Flags::SHUTDOWN);
None // TODO: start shutdown timer
return Ok(());
}
} else if let Some(deadline) = self.settings.keep_alive_expire()
{
timer.reset(deadline)
} }
} else { } else {
self.settings.keep_alive_timer() timer.reset(self.ka_expire)
}
} else {
Some(Delay::new(self.ka_expire))
} }
} }
Ok(Async::NotReady) => None, Ok(Async::NotReady) => (),
Err(e) => { Err(e) => {
error!("Timer error {:?}", e); error!("Timer error {:?}", e);
return false; return Err(HttpDispatchError::Unknown);
}
} }
} }
} else {
None
};
if let Some(mut timer) = timer { Ok(())
let _ = timer.poll();
self.ka_timer = Some(timer);
}
true
} }
#[inline] #[inline]
/// read data from stream /// read data from stream
pub fn poll_io(&mut self) { pub fn poll_io(&mut self) -> Result<(), HttpDispatchError> {
if !self.flags.contains(Flags::POLLED) { if !self.flags.contains(Flags::POLLED) {
self.parse(); self.parse()?;
self.flags.insert(Flags::POLLED); self.flags.insert(Flags::POLLED);
return; return Ok(());
} }
// read io from socket // read io from socket
@ -286,51 +279,28 @@ where
match self.stream.get_mut().read_available(&mut self.buf) { match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready((read_some, disconnected))) => { Ok(Async::Ready((read_some, disconnected))) => {
if read_some { if read_some {
self.parse(); self.parse()?;
} }
if disconnected { if disconnected {
self.read_disconnected(); self.client_disconnected(true);
// delay disconnect until all tasks have finished.
if self.tasks.is_empty() {
self.write_disconnected();
}
} }
} }
Ok(Async::NotReady) => (), Ok(Async::NotReady) => (),
Err(_) => { Err(err) => {
self.read_disconnected(); self.client_disconnected(false);
self.write_disconnected(); return Err(err.into());
} }
} }
} }
Ok(())
} }
pub fn poll_handler(&mut self) -> Poll<bool, HttpDispatchError> { pub fn poll_handler(&mut self) -> Poll<bool, HttpDispatchError> {
let retry = self.can_read(); let retry = self.can_read();
// check in-flight messages // process first pipelined response, only one task can do io operation in http/1
let mut io = false; while !self.tasks.is_empty() {
let mut idx = 0; match self.tasks[0].poll_io(&mut self.stream) {
while idx < self.tasks.len() {
// only one task can do io operation in http/1
if !io
&& !self.tasks[idx].flags.contains(EntryFlags::EOF)
&& !self.flags.contains(Flags::WRITE_DISCONNECTED)
{
// io is corrupted, send buffer
if self.tasks[idx].flags.contains(EntryFlags::ERROR) {
if let Ok(Async::NotReady) = self.stream.poll_completed(true) {
return Ok(Async::NotReady);
}
self.flags.insert(Flags::ERROR);
return Err(self
.error
.take()
.map(|e| e.into())
.unwrap_or(HttpDispatchError::Unknown));
}
match self.tasks[idx].pipe.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => { Ok(Async::Ready(ready)) => {
// override keep-alive state // override keep-alive state
if self.stream.keepalive() { if self.stream.keepalive() {
@ -341,81 +311,86 @@ where
// prepare stream for next response // prepare stream for next response
self.stream.reset(); self.stream.reset();
if ready { let task = self.tasks.pop_front().unwrap();
self.tasks[idx] if !ready {
.flags // task is done with io operations but still needs to do more work
.insert(EntryFlags::EOF | EntryFlags::FINISHED); spawn(HttpHandlerTaskFut::new(task.into_task()));
} else {
self.tasks[idx].flags.insert(EntryFlags::EOF);
} }
} }
// no more IO for this iteration
Ok(Async::NotReady) => { Ok(Async::NotReady) => {
// check if we need timer // check if we need timer
if self.ka_timer.is_some() && self.stream.upgrade() { if self.ka_timer.is_some() && self.stream.upgrade() {
self.ka_timer.take(); self.ka_timer.take();
} }
// check if previously read backpressure was enabled // if read-backpressure is enabled and we consumed some data.
if self.can_read() && !retry { // we may read more data
if !retry && self.can_read() {
return Ok(Async::Ready(true)); return Ok(Async::Ready(true));
} }
io = true; break;
} }
Err(err) => { Err(err) => {
error!("Unhandled error1: {}", err); error!("Unhandled error1: {}", err);
// it is not possible to recover from error // it is not possible to recover from error
// during pipe handling, so just drop connection // during pipe handling, so just drop connection
self.read_disconnected(); self.client_disconnected(false);
self.write_disconnected(); return Err(err.into());
self.tasks[idx].flags.insert(EntryFlags::ERROR);
self.error = Some(err);
continue;
} }
} }
} else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) {
match self.tasks[idx].pipe.poll_completed() {
Ok(Async::NotReady) => (),
Ok(Async::Ready(_)) => {
self.tasks[idx].flags.insert(EntryFlags::FINISHED)
} }
// check in-flight messages. all tasks must be alive,
// they need to produce response. if app returned error
// and we can not continue processing incoming requests.
let mut idx = 1;
while idx < self.tasks.len() {
let stop = match self.tasks[idx].poll_completed() {
Ok(Async::NotReady) => false,
Ok(Async::Ready(_)) => true,
Err(err) => { Err(err) => {
error!("Unhandled error: {}", err); self.error = Some(err.into());
self.read_disconnected(); true
self.write_disconnected(); }
self.tasks[idx].flags.insert(EntryFlags::ERROR); };
self.error = Some(err); if stop {
continue; // error in task handling or task is completed,
// so no response for this task which means we can not read more requests
// because pipeline sequence is broken.
// but we can safely complete existing tasks
self.flags.insert(Flags::READ_DISCONNECTED);
for mut task in self.tasks.drain(idx..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
} }
} }
} }
break;
} else {
idx += 1; idx += 1;
} }
// cleanup finished tasks
while !self.tasks.is_empty() {
if self.tasks[0]
.flags
.contains(EntryFlags::EOF | EntryFlags::FINISHED)
{
self.tasks.pop_front();
} else {
break;
}
} }
// check stream state // flush stream
if self.flags.contains(Flags::STARTED) { if self.flags.contains(Flags::STARTED) {
match self.stream.poll_completed(false) { match self.stream.poll_completed(false) {
Ok(Async::NotReady) => return Ok(Async::NotReady), Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => { Err(err) => {
debug!("Error sending data: {}", err); debug!("Error sending data: {}", err);
self.read_disconnected(); self.client_disconnected(false);
self.write_disconnected();
return Err(err.into()); return Err(err.into());
} }
Ok(Async::Ready(_)) => { Ok(Async::Ready(_)) => {
// non consumed payload in that case close connection // if payload is not consumed we can not use connection
if self.payload.is_some() && self.tasks.is_empty() { if self.payload.is_some() && self.tasks.is_empty() {
return Ok(Async::Ready(false)); return Ok(Async::Ready(false));
} }
@ -427,13 +402,11 @@ where
} }
fn push_response_entry(&mut self, status: StatusCode) { fn push_response_entry(&mut self, status: StatusCode) {
self.tasks.push_back(Entry { self.tasks
pipe: EntryPipe::Error(ServerError::err(Version::HTTP_11, status)), .push_back(Entry::Error(ServerError::err(Version::HTTP_11, status)));
flags: EntryFlags::empty(),
});
} }
pub fn parse(&mut self) { pub fn parse(&mut self) -> Result<(), HttpDispatchError> {
let mut updated = false; let mut updated = false;
'outer: loop { 'outer: loop {
@ -457,9 +430,9 @@ where
// search handler for request // search handler for request
match self.settings.handler().handle(msg) { match self.settings.handler().handle(msg) {
Ok(mut pipe) => { Ok(mut task) => {
if self.tasks.is_empty() { if self.tasks.is_empty() {
match pipe.poll_io(&mut self.stream) { match task.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => { Ok(Async::Ready(ready)) => {
// override keep-alive state // override keep-alive state
if self.stream.keepalive() { if self.stream.keepalive() {
@ -471,51 +444,38 @@ where
self.stream.reset(); self.stream.reset();
if !ready { if !ready {
let item = Entry { // task is done with io operations
pipe: EntryPipe::Task(pipe), // but still needs to do more work
flags: EntryFlags::EOF, spawn(HttpHandlerTaskFut::new(task));
};
self.tasks.push_back(item);
} }
continue 'outer; continue 'outer;
} }
Ok(Async::NotReady) => (), Ok(Async::NotReady) => (),
Err(err) => { Err(err) => {
error!("Unhandled error: {}", err); error!("Unhandled error: {}", err);
self.flags.insert(Flags::ERROR); self.client_disconnected(false);
return; return Err(err.into());
} }
} }
} }
self.tasks.push_back(Entry { self.tasks.push_back(Entry::Task(task));
pipe: EntryPipe::Task(pipe),
flags: EntryFlags::empty(),
});
continue 'outer; continue 'outer;
} }
Err(_) => { Err(_) => {
// handler is not found
self.tasks.push_back(Entry {
pipe: EntryPipe::Error(ServerError::err(
Version::HTTP_11,
StatusCode::NOT_FOUND,
)),
flags: EntryFlags::empty(),
});
}
}
// handler is not found // handler is not found
self.push_response_entry(StatusCode::NOT_FOUND); self.push_response_entry(StatusCode::NOT_FOUND);
} }
}
}
Ok(Some(Message::Chunk(chunk))) => { Ok(Some(Message::Chunk(chunk))) => {
updated = true; updated = true;
if let Some(ref mut payload) = self.payload { if let Some(ref mut payload) = self.payload {
payload.feed_data(chunk); payload.feed_data(chunk);
} else { } else {
error!("Internal server error: unexpected payload chunk"); error!("Internal server error: unexpected payload chunk");
self.flags.insert(Flags::ERROR); self.flags.insert(Flags::READ_DISCONNECTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break; break;
} }
} }
@ -525,23 +485,19 @@ where
payload.feed_eof(); payload.feed_eof();
} else { } else {
error!("Internal server error: unexpected eof"); error!("Internal server error: unexpected eof");
self.flags.insert(Flags::ERROR); self.flags.insert(Flags::READ_DISCONNECTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break; break;
} }
} }
Ok(None) => { Ok(None) => {
if self.flags.contains(Flags::READ_DISCONNECTED) { if self.flags.contains(Flags::READ_DISCONNECTED) {
self.read_disconnected(); self.client_disconnected(true);
if self.tasks.is_empty() {
self.write_disconnected();
}
} }
break; break;
} }
Err(e) => { Err(e) => {
updated = false;
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() { if let Some(mut payload) = self.payload.take() {
let e = match e { let e = match e {
DecoderError::Io(e) => PayloadError::Io(e), DecoderError::Io(e) => PayloadError::Io(e),
@ -550,8 +506,10 @@ where
payload.set_error(e); payload.set_error(e);
} }
//Malformed requests should be responded with 400 // Malformed requests should be responded with 400
self.push_response_entry(StatusCode::BAD_REQUEST); self.push_response_entry(StatusCode::BAD_REQUEST);
self.flags.insert(Flags::READ_DISCONNECTED);
self.error = Some(HttpDispatchError::MalformedRequest);
break; break;
} }
} }
@ -562,6 +520,7 @@ where
self.ka_expire = expire; self.ka_expire = expire;
} }
} }
Ok(())
} }
} }
@ -708,15 +667,15 @@ mod tests {
#[test] #[test]
fn test_req_parse_err() { fn test_req_parse_err() {
let mut sys = System::new("test"); let mut sys = System::new("test");
sys.block_on(future::lazy(|| { let _ = sys.block_on(future::lazy(|| {
let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let buf = Buffer::new("GET /test HTTP/1\r\n\r\n");
let readbuf = BytesMut::new(); let readbuf = BytesMut::new();
let settings = wrk_settings(); let settings = wrk_settings();
let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None);
h1.poll_io(); assert!(h1.poll_io().is_ok());
h1.poll_io(); assert!(h1.poll_io().is_ok());
assert!(h1.flags.contains(Flags::ERROR)); assert!(h1.flags.contains(Flags::READ_DISCONNECTED));
assert_eq!(h1.tasks.len(), 1); assert_eq!(h1.tasks.len(), 1);
future::ok::<_, ()>(()) future::ok::<_, ()>(())
})); }));

View File

@ -18,6 +18,7 @@ pub(crate) struct H1Decoder {
decoder: Option<EncodingDecoder>, decoder: Option<EncodingDecoder>,
} }
#[derive(Debug)]
pub(crate) enum Message { pub(crate) enum Message {
Message { msg: Request, payload: bool }, Message { msg: Request, payload: bool },
Chunk(Bytes), Chunk(Bytes),

View File

@ -1,4 +1,4 @@
use futures::{Async, Poll}; use futures::{Async, Future, Poll};
use super::message::Request; use super::message::Request;
use super::Writer; use super::Writer;
@ -42,6 +42,25 @@ impl HttpHandlerTask for Box<HttpHandlerTask> {
} }
} }
pub(super) struct HttpHandlerTaskFut<T: HttpHandlerTask> {
task: T,
}
impl<T: HttpHandlerTask> HttpHandlerTaskFut<T> {
pub(crate) fn new(task: T) -> Self {
Self { task }
}
}
impl<T: HttpHandlerTask> Future for HttpHandlerTaskFut<T> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.task.poll_completed().map_err(|_| ())
}
}
/// Conversion helper trait /// Conversion helper trait
pub trait IntoHttpHandler { pub trait IntoHttpHandler {
/// The associated type which is result of conversion. /// The associated type which is result of conversion.

View File

@ -485,7 +485,7 @@ impl<H: IntoHttpHandler, F: Fn() -> H + Send + Clone> HttpServer<H, F> {
socket.lst, socket.lst,
host, host,
socket.addr, socket.addr,
self.keep_alive.clone(), self.keep_alive,
self.client_timeout, self.client_timeout,
); );
} }
@ -531,7 +531,7 @@ impl<H: IntoHttpHandler, F: Fn() -> H + Send + Clone> HttpServer<H, F> {
socket.lst, socket.lst,
host, host,
socket.addr, socket.addr,
self.keep_alive.clone(), self.keep_alive,
self.client_timeout, self.client_timeout,
); );
} }

View File

@ -41,9 +41,7 @@ where
// start server // start server
HttpIncoming::create(move |ctx| { HttpIncoming::create(move |ctx| {
ctx.add_message_stream( ctx.add_message_stream(stream.map_err(|_| ()).map(WrapperStream::new));
stream.map_err(|_| ()).map(move |t| WrapperStream::new(t)),
);
HttpIncoming { settings } HttpIncoming { settings }
}); });
} }

View File

@ -1,5 +1,6 @@
use std::cell::{Cell, Ref, RefCell, RefMut}; use std::cell::{Cell, Ref, RefCell, RefMut};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fmt;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::rc::Rc; use std::rc::Rc;
@ -220,6 +221,26 @@ impl Request {
} }
} }
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"\nRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
)?;
if let Some(q) = self.uri().query().as_ref() {
writeln!(f, " query: ?{:?}", q)?;
}
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
pub(crate) struct RequestPool( pub(crate) struct RequestPool(
RefCell<VecDeque<Rc<InnerRequest>>>, RefCell<VecDeque<Rc<InnerRequest>>>,
RefCell<ServerSettings>, RefCell<ServerSettings>,