1
0
mirror of https://github.com/actix/actix-extras.git synced 2024-11-24 16:02:59 +01:00

enable slow request timeout for h2 dispatcher

This commit is contained in:
Nikolay Kim 2018-10-08 07:47:42 -07:00
parent 1e1a4f846e
commit cfad5bf1f3
4 changed files with 78 additions and 74 deletions

View File

@ -203,7 +203,7 @@ where
#[inline] #[inline]
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
// check connection keep-alive // check connection keep-alive
self.poll_keep_alive()?; self.poll_keepalive()?;
// shutdown // shutdown
if self.flags.contains(Flags::SHUTDOWN) { if self.flags.contains(Flags::SHUTDOWN) {
@ -277,23 +277,21 @@ where
} }
/// keep-alive timer. returns `true` is keep-alive, otherwise drop /// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keep_alive(&mut self) -> Result<(), HttpDispatchError> { fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer { if let Some(ref mut timer) = self.ka_timer {
match timer.poll() { match timer.poll() {
Ok(Async::Ready(_)) => { Ok(Async::Ready(_)) => {
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
// if we get timer during shutdown, just drop connection // if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) { if self.flags.contains(Flags::SHUTDOWN) {
let io = self.stream.get_mut(); let io = self.stream.get_mut();
let _ = IoStream::set_linger( let _ = IoStream::set_linger(io, Some(Duration::from_secs(0)));
io,
Some(Duration::from_secs(0)),
);
let _ = IoStream::shutdown(io, Shutdown::Both); let _ = IoStream::shutdown(io, Shutdown::Both);
return Err(HttpDispatchError::ShutdownTimeout); return Err(HttpDispatchError::ShutdownTimeout);
} else if !self.flags.contains(Flags::STARTED) { }
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
if !self.flags.contains(Flags::STARTED) {
// timeout on first request (slow request) return 408 // timeout on first request (slow request) return 408
trace!("Slow request timeout"); trace!("Slow request timeout");
self.flags self.flags
@ -315,9 +313,8 @@ where
return Ok(()); return Ok(());
} }
} }
} else if let Some(deadline) = self.settings.keep_alive_expire() } else if let Some(dl) = self.settings.keep_alive_expire() {
{ timer.reset(dl)
timer.reset(deadline)
} }
} else { } else {
timer.reset(self.ka_expire) timer.reset(self.ka_expire)

View File

@ -27,7 +27,8 @@ use super::{HttpHandler, HttpHandlerTask, IoStream, Writer};
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
const DISCONNECTED = 0b0000_0010; const DISCONNECTED = 0b0000_0001;
const SHUTDOWN = 0b0000_0010;
} }
} }
@ -42,8 +43,9 @@ where
addr: Option<SocketAddr>, addr: Option<SocketAddr>,
state: State<IoWrapper<T>>, state: State<IoWrapper<T>>,
tasks: VecDeque<Entry<H>>, tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
extensions: Option<Rc<Extensions>>, extensions: Option<Rc<Extensions>>,
ka_expire: Instant,
ka_timer: Option<Delay>,
} }
enum State<T: AsyncRead + AsyncWrite> { enum State<T: AsyncRead + AsyncWrite> {
@ -62,6 +64,16 @@ where
) -> Self { ) -> Self {
let addr = io.peer_addr(); let addr = io.peer_addr();
let extensions = io.extensions(); let extensions = io.extensions();
// keep-alive timeout
let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
(delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
Http2 { Http2 {
flags: Flags::empty(), flags: Flags::empty(),
tasks: VecDeque::new(), tasks: VecDeque::new(),
@ -72,14 +84,14 @@ where
addr, addr,
settings, settings,
extensions, extensions,
keepalive_timer, ka_expire,
ka_timer,
} }
} }
pub(crate) fn shutdown(&mut self) { pub(crate) fn shutdown(&mut self) {
self.state = State::Empty; self.state = State::Empty;
self.tasks.clear(); self.tasks.clear();
self.keepalive_timer.take();
} }
pub fn settings(&self) -> &ServiceConfig<H> { pub fn settings(&self) -> &ServiceConfig<H> {
@ -87,21 +99,16 @@ where
} }
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
self.poll_keepalive()?;
// server // server
if let State::Connection(ref mut conn) = self.state { if let State::Connection(ref mut conn) = self.state {
// keep-alive timer loop {
if let Some(ref mut timeout) = self.keepalive_timer { // shutdown connection
match timeout.poll() { if self.flags.contains(Flags::SHUTDOWN) {
Ok(Async::Ready(_)) => { return conn.poll_close().map_err(|e| e.into());
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
} }
loop {
let mut not_ready = true; let mut not_ready = true;
let disconnected = self.flags.contains(Flags::DISCONNECTED); let disconnected = self.flags.contains(Flags::DISCONNECTED);
@ -216,8 +223,12 @@ where
not_ready = false; not_ready = false;
let (parts, body) = req.into_parts(); let (parts, body) = req.into_parts();
// stop keepalive timer // update keep-alive expire
self.keepalive_timer.take(); if self.ka_timer.is_some() {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
self.tasks.push_back(Entry::new( self.tasks.push_back(Entry::new(
parts, parts,
@ -228,36 +239,14 @@ where
self.extensions.clone(), self.extensions.clone(),
)); ));
} }
Ok(Async::NotReady) => { Ok(Async::NotReady) => return Ok(Async::NotReady),
// start keep-alive timer
if self.tasks.is_empty() {
if self.settings.keep_alive_enabled() {
if self.keepalive_timer.is_none() {
if let Some(ka) = self.settings.keep_alive() {
trace!("Start keep-alive timer");
let mut timeout =
Delay::new(Instant::now() + ka);
// register timeout
let _ = timeout.poll();
self.keepalive_timer = Some(timeout);
}
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(|e| e.into());
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady);
}
}
Err(err) => { Err(err) => {
trace!("Connection error: {}", err); trace!("Connection error: {}", err);
self.flags.insert(Flags::DISCONNECTED); self.flags.insert(Flags::SHUTDOWN);
for entry in &mut self.tasks { for entry in &mut self.tasks {
entry.task.disconnected() entry.task.disconnected()
} }
self.keepalive_timer.take(); continue;
} }
} }
} }
@ -289,6 +278,37 @@ where
self.poll() self.poll()
} }
/// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
return Err(HttpDispatchError::ShutdownTimeout);
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
}
} else {
timer.reset(self.ka_expire)
}
}
Ok(Async::NotReady) => (),
Err(e) => {
error!("Timer error {:?}", e);
return Err(HttpDispatchError::Unknown);
}
}
}
Ok(())
}
} }
bitflags! { bitflags! {

View File

@ -197,11 +197,6 @@ where
} }
/// Disable `HTTP/2` support /// Disable `HTTP/2` support
// #[doc(hidden)]
// #[deprecated(
// since = "0.7.4",
// note = "please use acceptor service with proper ServerFlags parama"
// )]
pub fn no_http2(mut self) -> Self { pub fn no_http2(mut self) -> Self {
self.no_http2 = true; self.no_http2 = true;
self self

View File

@ -12,7 +12,7 @@
//! to serve incoming HTTP requests. //! to serve incoming HTTP requests.
//! //!
//! As the server uses worker pool, the factory function is restricted to trait bounds //! As the server uses worker pool, the factory function is restricted to trait bounds
//! `Sync + Send + 'static` so that each worker would be able to accept Application //! `Send + Clone + 'static` so that each worker would be able to accept Application
//! without a need for synchronization. //! without a need for synchronization.
//! //!
//! If you wish to share part of state among all workers you should //! If you wish to share part of state among all workers you should
@ -29,13 +29,9 @@
//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html) //! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html)
//! that describes how HTTP Server accepts connections. //! that describes how HTTP Server accepts connections.
//! //!
//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts //! For `bind` and `listen` there are corresponding `bind_ssl|tls|rustls` and `listen_ssl|tls|rustls` that accepts
//! these services. //! these services.
//! //!
//! By default, acceptor would work with both HTTP2 and HTTP1 protocols.
//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which
//! can be supplied when creating `AcceptorService`.
//!
//! **NOTE:** `native-tls` doesn't support `HTTP2` yet //! **NOTE:** `native-tls` doesn't support `HTTP2` yet
//! //!
//! ## Signal handling and shutdown //! ## Signal handling and shutdown
@ -87,17 +83,13 @@
//! // load ssl keys //! // load ssl keys
//! let config = load_ssl(); //! let config = load_ssl();
//! //!
//! // Create acceptor service for only HTTP1 protocol
//! // You can use ::new(config) to leave defaults
//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1);
//!
//! // create and start server at once //! // create and start server at once
//! server::new(|| { //! server::new(|| {
//! App::new() //! App::new()
//! // register simple handler, handle all methods //! // register simple handler, handle all methods
//! .resource("/index.html", |r| r.f(index)) //! .resource("/index.html", |r| r.f(index))
//! })) //! }))
//! }).bind_with("127.0.0.1:8080", acceptor) //! }).bind_rustls("127.0.0.1:8443", config)
//! .unwrap() //! .unwrap()
//! .start(); //! .start();
//! //!