1
0
mirror of https://github.com/fafhrd91/actix-web synced 2024-11-24 16:32:59 +01:00

refactor h1 dispatcher

This commit is contained in:
Nikolay Kim 2018-11-16 21:09:33 -08:00
parent 625469f0f4
commit aa20e2670d
3 changed files with 267 additions and 230 deletions

View File

@ -125,6 +125,15 @@ impl ClientPayloadCodec {
} }
} }
fn prn_version(ver: Version) -> &'static str {
match ver {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
}
}
impl ClientCodecInner { impl ClientCodecInner {
fn encode_response( fn encode_response(
&mut self, &mut self,
@ -135,33 +144,63 @@ impl ClientCodecInner {
// render message // render message
{ {
// status line // status line
writeln!( write!(
Writer(buffer), Writer(buffer),
"{} {} {:?}\r", "{} {} {}",
msg.method, msg.method,
msg.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"), msg.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
msg.version prn_version(msg.version)
).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write headers // write headers
buffer.reserve(msg.headers.len() * AVERAGE_HEADER_SIZE); buffer.reserve(msg.headers.len() * AVERAGE_HEADER_SIZE);
for (key, value) in &msg.headers {
let v = value.as_ref();
let k = key.as_str().as_bytes();
buffer.reserve(k.len() + v.len() + 4);
buffer.put_slice(k);
buffer.put_slice(b": ");
buffer.put_slice(v);
buffer.put_slice(b"\r\n");
// Connection upgrade // content length
if key == UPGRADE { let mut len_is_set = true;
self.flags.insert(Flags::UPGRADE); match btype {
BodyType::Sized(len) => {
buffer.extend_from_slice(b"\r\ncontent-length: ");
write!(buffer.writer(), "{}", len)?;
buffer.extend_from_slice(b"\r\n");
} }
BodyType::Unsized => {
buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n")
}
BodyType::Zero => {
len_is_set = false;
buffer.extend_from_slice(b"\r\n")
}
BodyType::None => buffer.extend_from_slice(b"\r\n"),
}
let mut has_date = false;
for (key, value) in &msg.headers {
match *key {
TRANSFER_ENCODING => continue,
CONTENT_LENGTH => match btype {
BodyType::None => (),
BodyType::Zero => len_is_set = true,
_ => continue,
},
DATE => has_date = true,
UPGRADE => self.flags.insert(Flags::UPGRADE),
_ => (),
}
buffer.put_slice(key.as_ref());
buffer.put_slice(b": ");
buffer.put_slice(value.as_ref());
buffer.put_slice(b"\r\n");
}
// set content length
if !len_is_set {
buffer.extend_from_slice(b"content-length: 0\r\n")
} }
// set date header // set date header
if !msg.headers.contains_key(DATE) { if !has_date {
self.config.set_date(buffer); self.config.set_date(buffer);
} else { } else {
buffer.extend_from_slice(b"\r\n"); buffer.extend_from_slice(b"\r\n");

View File

@ -42,10 +42,9 @@ impl<T: MessageTypeDecoder> Decoder for MessageDecoder<T> {
} }
pub(crate) enum PayloadLength { pub(crate) enum PayloadLength {
None, Payload(PayloadType),
Chunked,
Upgrade, Upgrade,
Length(u64), None,
} }
pub(crate) trait MessageTypeDecoder: Sized { pub(crate) trait MessageTypeDecoder: Sized {
@ -55,7 +54,7 @@ pub(crate) trait MessageTypeDecoder: Sized {
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError>; fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError>;
fn process_headers( fn set_headers(
&mut self, &mut self,
slice: &Bytes, slice: &Bytes,
version: Version, version: Version,
@ -140,10 +139,17 @@ pub(crate) trait MessageTypeDecoder: Sized {
self.keep_alive(); self.keep_alive();
} }
// https://tools.ietf.org/html/rfc7230#section-3.3.3
if chunked { if chunked {
Ok(PayloadLength::Chunked) // Chunked encoding
Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::chunked(),
)))
} else if let Some(len) = content_length { } else if let Some(len) = content_length {
Ok(PayloadLength::Length(len)) // Content-Length
Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::length(len),
)))
} else if has_upgrade { } else if has_upgrade {
Ok(PayloadLength::Upgrade) Ok(PayloadLength::Upgrade)
} else { } else {
@ -166,7 +172,7 @@ impl MessageTypeDecoder for Request {
// performance bump for pipeline benchmarks. // performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() }; let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
let (len, method, uri, version, headers_len) = { let (len, method, uri, ver, h_len) = {
let mut parsed: [httparse::Header; MAX_HEADERS] = let mut parsed: [httparse::Header; MAX_HEADERS] =
unsafe { mem::uninitialized() }; unsafe { mem::uninitialized() };
@ -189,35 +195,24 @@ impl MessageTypeDecoder for Request {
} }
}; };
// convert headers
let mut msg = Request::new(); let mut msg = Request::new();
let len = msg.process_headers( // convert headers
&src.split_to(len).freeze(), let len =
version, msg.set_headers(&src.split_to(len).freeze(), ver, &headers[..h_len])?;
&headers[..headers_len],
)?;
// https://tools.ietf.org/html/rfc7230#section-3.3.3 // payload decoder
let decoder = match len { let decoder = match len {
PayloadLength::Chunked => { PayloadLength::Payload(pl) => pl,
// Chunked encoding
PayloadType::Payload(PayloadDecoder::chunked())
}
PayloadLength::Length(len) => {
// Content-Length
PayloadType::Payload(PayloadDecoder::length(len))
}
PayloadLength::Upgrade => { PayloadLength::Upgrade => {
// upgrade(websocket) or connect // upgrade(websocket)
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} }
PayloadLength::None => { PayloadLength::None => {
if method == Method::CONNECT { if method == Method::CONNECT {
// upgrade(websocket) or connect
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE { } else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing"); trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge); return Err(ParseError::TooLarge);
} else { } else {
PayloadType::None PayloadType::None
@ -230,7 +225,7 @@ impl MessageTypeDecoder for Request {
inner.url.update(&uri); inner.url.update(&uri);
inner.head.uri = uri; inner.head.uri = uri;
inner.head.method = method; inner.head.method = method;
inner.head.version = version; inner.head.version = ver;
} }
Ok(Some((msg, decoder))) Ok(Some((msg, decoder)))
@ -251,7 +246,7 @@ impl MessageTypeDecoder for ClientResponse {
// performance bump for pipeline benchmarks. // performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() }; let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
let (len, version, status, headers_len) = { let (len, ver, status, h_len) = {
let mut parsed: [httparse::Header; MAX_HEADERS] = let mut parsed: [httparse::Header; MAX_HEADERS] =
unsafe { mem::uninitialized() }; unsafe { mem::uninitialized() };
@ -276,37 +271,26 @@ impl MessageTypeDecoder for ClientResponse {
let mut msg = ClientResponse::new(); let mut msg = ClientResponse::new();
// convert headers // convert headers
let len = msg.process_headers( let len =
&src.split_to(len).freeze(), msg.set_headers(&src.split_to(len).freeze(), ver, &headers[..h_len])?;
version,
&headers[..headers_len],
)?;
// https://tools.ietf.org/html/rfc7230#section-3.3.3 // message payload
let decoder = match len { let decoder = if let PayloadLength::Payload(pl) = len {
PayloadLength::Chunked => { pl
// Chunked encoding } else {
PayloadType::Payload(PayloadDecoder::chunked()) if status == StatusCode::SWITCHING_PROTOCOLS {
} // switching protocol or connect
PayloadLength::Length(len) => { PayloadType::Stream(PayloadDecoder::eof())
// Content-Length } else if src.len() >= MAX_BUFFER_SIZE {
PayloadType::Payload(PayloadDecoder::length(len)) error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
} return Err(ParseError::TooLarge);
_ => { } else {
if status == StatusCode::SWITCHING_PROTOCOLS { PayloadType::None
// switching protocol or connect
PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
PayloadType::None
}
} }
}; };
msg.head.status = status; msg.head.status = status;
msg.head.version = Some(version); msg.head.version = Some(ver);
Ok(Some((msg, decoder))) Ok(Some((msg, decoder)))
} }

View File

@ -1,11 +1,12 @@
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fmt::Debug; use std::fmt::Debug;
use std::mem;
use std::time::Instant; use std::time::Instant;
use actix_net::codec::Framed; use actix_net::codec::Framed;
use actix_net::service::Service; use actix_net::service::Service;
use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; use futures::{Async, Future, Poll, Sink, Stream};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay; use tokio_timer::Delay;
@ -37,12 +38,19 @@ bitflags! {
/// Dispatcher for HTTP/1.1 protocol /// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S: Service> pub struct Dispatcher<T, S: Service>
where
S::Error: Debug,
{
inner: Option<InnerDispatcher<T, S>>,
}
struct InnerDispatcher<T, S: Service>
where where
S::Error: Debug, S::Error: Debug,
{ {
service: S, service: S,
flags: Flags, flags: Flags,
framed: Option<Framed<T, Codec>>, framed: Framed<T, Codec>,
error: Option<DispatchError<S::Error>>, error: Option<DispatchError<S::Error>>,
config: ServiceConfig, config: ServiceConfig,
@ -63,7 +71,6 @@ enum DispatcherMessage {
enum State<S: Service> { enum State<S: Service> {
None, None,
ServiceCall(S::Future), ServiceCall(S::Future),
SendResponse(Option<(Message<Response>, Body)>),
SendPayload(BodyStream), SendPayload(BodyStream),
} }
@ -113,20 +120,29 @@ where
}; };
Dispatcher { Dispatcher {
payload: None, inner: Some(InnerDispatcher {
state: State::None, framed,
error: None, payload: None,
messages: VecDeque::new(), state: State::None,
framed: Some(framed), error: None,
unhandled: None, messages: VecDeque::new(),
service, unhandled: None,
flags, service,
config, flags,
ka_expire, config,
ka_timer, ka_expire,
ka_timer,
}),
} }
} }
}
impl<T, S> InnerDispatcher<T, S>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request, Response = Response>,
S::Error: Debug,
{
fn can_read(&self) -> bool { fn can_read(&self) -> bool {
if self.flags.contains(Flags::DISCONNECTED) { if self.flags.contains(Flags::DISCONNECTED) {
return false; return false;
@ -150,7 +166,7 @@ where
/// Flush stream /// Flush stream
fn poll_flush(&mut self) -> Poll<(), DispatchError<S::Error>> { fn poll_flush(&mut self) -> Poll<(), DispatchError<S::Error>> {
if !self.flags.contains(Flags::FLUSHED) { if !self.flags.contains(Flags::FLUSHED) {
match self.framed.as_mut().unwrap().poll_complete() { match self.framed.poll_complete() {
Ok(Async::NotReady) => Ok(Async::NotReady), Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => { Err(err) => {
debug!("Error sending data: {}", err); debug!("Error sending data: {}", err);
@ -170,90 +186,82 @@ where
} }
} }
fn send_response(
&mut self,
message: Response,
body: Body,
) -> Result<State<S>, DispatchError<S::Error>> {
self.framed
.force_send(Message::Item(message))
.map_err(|err| {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
self.flags
.set(Flags::KEEPALIVE, self.framed.get_codec().keepalive());
self.flags.remove(Flags::FLUSHED);
match body {
Body::Empty => Ok(State::None),
Body::Streaming(stream) => Ok(State::SendPayload(stream)),
Body::Binary(mut bin) => {
self.flags.remove(Flags::FLUSHED);
self.framed.force_send(Message::Chunk(Some(bin.take())))?;
self.framed.force_send(Message::Chunk(None))?;
Ok(State::None)
}
}
}
fn poll_response(&mut self) -> Result<(), DispatchError<S::Error>> { fn poll_response(&mut self) -> Result<(), DispatchError<S::Error>> {
let mut retry = self.can_read(); let mut retry = self.can_read();
// process
loop { loop {
let state = match self.state { let state = match mem::replace(&mut self.state, State::None) {
State::None => if let Some(msg) = self.messages.pop_front() { State::None => match self.messages.pop_front() {
match msg { Some(DispatcherMessage::Item(req)) => {
DispatcherMessage::Item(req) => Some(self.handle_request(req)?), Some(self.handle_request(req)?)
DispatcherMessage::Error(res) => Some(State::SendResponse(
Some((Message::Item(res), Body::Empty)),
)),
} }
} else { Some(DispatcherMessage::Error(res)) => {
None Some(self.send_response(res, Body::Empty)?)
}
None => None,
}, },
// call inner service State::ServiceCall(mut fut) => {
State::ServiceCall(ref mut fut) => {
match fut.poll().map_err(DispatchError::Service)? { match fut.poll().map_err(DispatchError::Service)? {
Async::Ready(mut res) => { Async::Ready(mut res) => {
self.framed self.framed.get_codec_mut().prepare_te(&mut res);
.as_mut()
.unwrap()
.get_codec_mut()
.prepare_te(&mut res);
let body = res.replace_body(Body::Empty); let body = res.replace_body(Body::Empty);
Some(State::SendResponse(Some((Message::Item(res), body)))) Some(self.send_response(res, body)?)
} }
Async::NotReady => None, Async::NotReady => {
} self.state = State::ServiceCall(fut);
} None
// send respons
State::SendResponse(ref mut item) => {
let (msg, body) = item.take().expect("SendResponse is empty");
let framed = self.framed.as_mut().unwrap();
match framed.start_send(msg) {
Ok(AsyncSink::Ready) => {
self.flags
.set(Flags::KEEPALIVE, framed.get_codec().keepalive());
self.flags.remove(Flags::FLUSHED);
match body {
Body::Empty => Some(State::None),
Body::Streaming(stream) => {
Some(State::SendPayload(stream))
}
Body::Binary(mut bin) => {
self.flags.remove(Flags::FLUSHED);
framed
.force_send(Message::Chunk(Some(bin.take())))?;
framed.force_send(Message::Chunk(None))?;
Some(State::None)
}
}
}
Ok(AsyncSink::NotReady(msg)) => {
*item = Some((msg, body));
return Ok(());
}
Err(err) => {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
return Err(DispatchError::Io(err));
} }
} }
} }
// Send payload State::SendPayload(mut stream) => {
State::SendPayload(ref mut stream) => {
let mut framed = self.framed.as_mut().unwrap();
loop { loop {
if !framed.is_write_buf_full() { if !self.framed.is_write_buf_full() {
match stream.poll().map_err(|_| DispatchError::Unknown)? { match stream.poll().map_err(|_| DispatchError::Unknown)? {
Async::Ready(Some(item)) => { Async::Ready(Some(item)) => {
self.flags.remove(Flags::FLUSHED); self.flags.remove(Flags::FLUSHED);
framed.force_send(Message::Chunk(Some(item)))?; self.framed
.force_send(Message::Chunk(Some(item)))?;
continue; continue;
} }
Async::Ready(None) => { Async::Ready(None) => {
self.flags.remove(Flags::FLUSHED); self.flags.remove(Flags::FLUSHED);
framed.force_send(Message::Chunk(None))?; self.framed.force_send(Message::Chunk(None))?;
}
Async::NotReady => {
self.state = State::SendPayload(stream);
return Ok(());
} }
Async::NotReady => return Ok(()),
} }
} else { } else {
self.state = State::SendPayload(stream);
return Ok(()); return Ok(());
} }
break; break;
@ -266,7 +274,7 @@ where
Some(state) => self.state = state, Some(state) => self.state = state,
None => { None => {
// if read-backpressure is enabled and we consumed some data. // if read-backpressure is enabled and we consumed some data.
// we may read more dataand retry // we may read more data and retry
if !retry && self.can_read() && self.poll_request()? { if !retry && self.can_read() && self.poll_request()? {
retry = self.can_read(); retry = self.can_read();
continue; continue;
@ -286,13 +294,9 @@ where
let mut task = self.service.call(req); let mut task = self.service.call(req);
match task.poll().map_err(DispatchError::Service)? { match task.poll().map_err(DispatchError::Service)? {
Async::Ready(mut res) => { Async::Ready(mut res) => {
self.framed self.framed.get_codec_mut().prepare_te(&mut res);
.as_mut()
.unwrap()
.get_codec_mut()
.prepare_te(&mut res);
let body = res.replace_body(Body::Empty); let body = res.replace_body(Body::Empty);
Ok(State::SendResponse(Some((Message::Item(res), body)))) self.send_response(res, body)
} }
Async::NotReady => Ok(State::ServiceCall(task)), Async::NotReady => Ok(State::ServiceCall(task)),
} }
@ -307,20 +311,14 @@ where
let mut updated = false; let mut updated = false;
loop { loop {
match self.framed.as_mut().unwrap().poll() { match self.framed.poll() {
Ok(Async::Ready(Some(msg))) => { Ok(Async::Ready(Some(msg))) => {
updated = true; updated = true;
self.flags.insert(Flags::STARTED); self.flags.insert(Flags::STARTED);
match msg { match msg {
Message::Item(req) => { Message::Item(req) => {
match self match self.framed.get_codec().message_type() {
.framed
.as_ref()
.unwrap()
.get_codec()
.message_type()
{
MessageType::Payload => { MessageType::Payload => {
let (ps, pl) = Payload::new(false); let (ps, pl) = Payload::new(false);
*req.inner.payload.borrow_mut() = Some(pl); *req.inner.payload.borrow_mut() = Some(pl);
@ -406,52 +404,58 @@ where
/// keep-alive timer /// keep-alive timer
fn poll_keepalive(&mut self) -> Result<(), DispatchError<S::Error>> { fn poll_keepalive(&mut self) -> Result<(), DispatchError<S::Error>> {
if let Some(ref mut timer) = self.ka_timer { if self.ka_timer.is_some() {
match timer.poll() { return Ok(());
Ok(Async::Ready(_)) => { }
// if we get timer during shutdown, just drop connection match self.ka_timer.as_mut().unwrap().poll().map_err(|e| {
if self.flags.contains(Flags::SHUTDOWN) { error!("Timer error {:?}", e);
return Err(DispatchError::DisconnectTimeout); DispatchError::Unknown
} else if timer.deadline() >= self.ka_expire { })? {
// check for any outstanding response processing Async::Ready(_) => {
if self.state.is_empty() && self.flags.contains(Flags::FLUSHED) { // if we get timeout during shutdown, drop connection
if self.flags.contains(Flags::STARTED) { if self.flags.contains(Flags::SHUTDOWN) {
trace!("Keep-alive timeout, close connection"); return Err(DispatchError::DisconnectTimeout);
self.flags.insert(Flags::SHUTDOWN); } else if self.ka_timer.as_mut().unwrap().deadline() >= self.ka_expire {
// check for any outstanding response processing
if self.state.is_empty() && self.flags.contains(Flags::FLUSHED) {
if self.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
// start shutdown timer // start shutdown timer
if let Some(deadline) = if let Some(deadline) = self.config.client_disconnect_timer()
self.config.client_disconnect_timer() {
{ self.ka_timer.as_mut().map(|timer| {
timer.reset(deadline); timer.reset(deadline);
let _ = timer.poll(); let _ = timer.poll();
} else { });
return Ok(());
}
} else { } else {
// timeout on first request (slow request) return 408 return Ok(());
trace!("Slow request timeout");
self.flags.insert(Flags::STARTED | Flags::DISCONNECTED);
self.state = State::SendResponse(Some((
Message::Item(Response::RequestTimeout().finish()),
Body::Empty,
)));
} }
} else if let Some(deadline) = self.config.keep_alive_expire() { } else {
// timeout on first request (slow request) return 408
trace!("Slow request timeout");
self.flags.insert(Flags::STARTED | Flags::DISCONNECTED);
self.state = self.send_response(
Response::RequestTimeout().finish(),
Body::Empty,
)?;
}
} else if let Some(deadline) = self.config.keep_alive_expire() {
self.ka_timer.as_mut().map(|timer| {
timer.reset(deadline); timer.reset(deadline);
let _ = timer.poll(); let _ = timer.poll();
} });
} else {
timer.reset(self.ka_expire);
let _ = timer.poll();
} }
} } else {
Ok(Async::NotReady) => (), let expire = self.ka_expire;
Err(e) => { self.ka_timer.as_mut().map(|timer| {
error!("Timer error {:?}", e); timer.reset(expire);
return Err(DispatchError::Unknown); let _ = timer.poll();
});
} }
} }
Async::NotReady => (),
} }
Ok(()) Ok(())
@ -469,43 +473,53 @@ where
#[inline] #[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.flags.contains(Flags::SHUTDOWN) { let shutdown = if let Some(ref mut inner) = self.inner {
self.poll_keepalive()?; if inner.flags.contains(Flags::SHUTDOWN) {
try_ready!(self.poll_flush()); inner.poll_keepalive()?;
let io = self.framed.take().unwrap().into_inner(); try_ready!(inner.poll_flush());
Ok(Async::Ready(H1ServiceResult::Shutdown(io))) true
} else {
self.poll_keepalive()?;
self.poll_request()?;
self.poll_response()?;
self.poll_flush()?;
// keep-alive and stream errors
if self.state.is_empty() && self.flags.contains(Flags::FLUSHED) {
if let Some(err) = self.error.take() {
Err(err)
} else if self.flags.contains(Flags::DISCONNECTED) {
Ok(Async::Ready(H1ServiceResult::Disconnected))
}
// unhandled request (upgrade or connect)
else if self.unhandled.is_some() {
let req = self.unhandled.take().unwrap();
let framed = self.framed.take().unwrap();
Ok(Async::Ready(H1ServiceResult::Unhandled(req, framed)))
}
// disconnect if keep-alive is not enabled
else if self.flags.contains(Flags::STARTED) && !self
.flags
.intersects(Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED)
{
let io = self.framed.take().unwrap().into_inner();
Ok(Async::Ready(H1ServiceResult::Shutdown(io)))
} else {
Ok(Async::NotReady)
}
} else { } else {
Ok(Async::NotReady) inner.poll_keepalive()?;
inner.poll_request()?;
inner.poll_response()?;
inner.poll_flush()?;
// keep-alive and stream errors
if inner.state.is_empty() && inner.flags.contains(Flags::FLUSHED) {
if let Some(err) = inner.error.take() {
return Err(err);
} else if inner.flags.contains(Flags::DISCONNECTED) {
return Ok(Async::Ready(H1ServiceResult::Disconnected));
}
// unhandled request (upgrade or connect)
else if inner.unhandled.is_some() {
false
}
// disconnect if keep-alive is not enabled
else if inner.flags.contains(Flags::STARTED) && !inner
.flags
.intersects(Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED)
{
true
} else {
return Ok(Async::NotReady);
}
} else {
return Ok(Async::NotReady);
}
} }
} else {
unreachable!()
};
let mut inner = self.inner.take().unwrap();
if shutdown {
Ok(Async::Ready(H1ServiceResult::Shutdown(
inner.framed.into_inner(),
)))
} else {
let req = inner.unhandled.take().unwrap();
Ok(Async::Ready(H1ServiceResult::Unhandled(req, inner.framed)))
} }
} }
} }