2019-12-05 11:40:24 +01:00
|
|
|
use std::time::Duration;
|
2019-07-18 13:05:40 +02:00
|
|
|
use std::{io, thread};
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2021-01-29 16:16:30 +01:00
|
|
|
use actix_rt::{
|
2021-03-27 00:37:01 +01:00
|
|
|
time::{sleep, Instant},
|
2021-01-29 16:16:30 +01:00
|
|
|
System,
|
|
|
|
};
|
2018-12-06 23:04:42 +01:00
|
|
|
use log::{error, info};
|
2020-12-29 00:44:53 +01:00
|
|
|
use mio::{Interest, Poll, Token as MioToken};
|
2018-08-19 19:47:04 +02:00
|
|
|
use slab::Slab;
|
|
|
|
|
2019-07-18 13:05:40 +02:00
|
|
|
use crate::server::Server;
|
2020-12-29 00:44:53 +01:00
|
|
|
use crate::socket::{MioListener, SocketAddr};
|
|
|
|
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
|
|
|
|
use crate::worker::{Conn, WorkerHandle};
|
2019-07-18 13:05:40 +02:00
|
|
|
use crate::Token;
|
2018-08-19 19:47:04 +02:00
|
|
|
|
|
|
|
struct ServerSocketInfo {
|
2021-03-27 00:37:01 +01:00
|
|
|
/// Address of socket. Mainly used for logging.
|
2019-07-18 13:05:40 +02:00
|
|
|
addr: SocketAddr,
|
2021-03-27 00:37:01 +01:00
|
|
|
|
|
|
|
/// Beware this is the crate token for identify socket and should not be confused
|
|
|
|
/// with `mio::Token`.
|
2018-08-19 19:47:04 +02:00
|
|
|
token: Token,
|
2021-03-27 00:37:01 +01:00
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
lst: MioListener,
|
2021-03-27 00:37:01 +01:00
|
|
|
|
|
|
|
/// Timeout is used to mark the deadline when this socket's listener should be registered again
|
|
|
|
/// after an error.
|
2018-08-19 19:47:04 +02:00
|
|
|
timeout: Option<Instant>,
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
/// Accept loop would live with `ServerBuilder`.
|
|
|
|
///
|
|
|
|
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
|
|
|
|
/// `Accept` and `Worker`.
|
|
|
|
///
|
|
|
|
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
|
2018-08-19 19:47:04 +02:00
|
|
|
pub(crate) struct AcceptLoop {
|
2018-12-10 06:51:35 +01:00
|
|
|
srv: Option<Server>,
|
2020-12-29 00:44:53 +01:00
|
|
|
poll: Option<Poll>,
|
|
|
|
waker: WakerQueue,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl AcceptLoop {
|
2020-12-29 00:44:53 +01:00
|
|
|
pub fn new(srv: Server) -> Self {
|
|
|
|
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
|
|
|
|
let waker = WakerQueue::new(poll.registry())
|
|
|
|
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
|
|
|
|
|
|
|
|
Self {
|
2018-12-10 06:51:35 +01:00
|
|
|
srv: Some(srv),
|
2020-12-29 00:44:53 +01:00
|
|
|
poll: Some(poll),
|
|
|
|
waker,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
pub(crate) fn waker_owned(&self) -> WakerQueue {
|
|
|
|
self.waker.clone()
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
pub fn wake(&self, i: WakerInterest) {
|
|
|
|
self.waker.wake(i);
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) fn start(
|
2018-10-30 04:29:47 +01:00
|
|
|
&mut self,
|
2020-12-29 00:44:53 +01:00
|
|
|
socks: Vec<(Token, MioListener)>,
|
|
|
|
handles: Vec<WorkerHandle>,
|
2018-12-10 06:51:35 +01:00
|
|
|
) {
|
|
|
|
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
|
2020-12-29 00:44:53 +01:00
|
|
|
let poll = self.poll.take().unwrap();
|
|
|
|
let waker = self.waker.clone();
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
Accept::start(poll, waker, socks, srv, handles);
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
/// poll instance of the server.
|
2018-08-19 19:47:04 +02:00
|
|
|
struct Accept {
|
2020-12-29 00:44:53 +01:00
|
|
|
poll: Poll,
|
|
|
|
waker: WakerQueue,
|
|
|
|
handles: Vec<WorkerHandle>,
|
2018-12-10 06:51:35 +01:00
|
|
|
srv: Server,
|
2018-08-19 19:47:04 +02:00
|
|
|
next: usize,
|
|
|
|
backpressure: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This function defines errors that are per-connection. Which basically
|
|
|
|
/// means that if we get this error from `accept()` system call it means
|
|
|
|
/// next connection might be ready to be accepted.
|
|
|
|
///
|
|
|
|
/// All other errors will incur a timeout before next `accept()` is performed.
|
|
|
|
/// The timeout is useful to handle resource exhaustion errors like ENFILE
|
|
|
|
/// and EMFILE. Otherwise, could enter into tight loop.
|
|
|
|
fn connection_error(e: &io::Error) -> bool {
|
|
|
|
e.kind() == io::ErrorKind::ConnectionRefused
|
|
|
|
|| e.kind() == io::ErrorKind::ConnectionAborted
|
|
|
|
|| e.kind() == io::ErrorKind::ConnectionReset
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Accept {
|
|
|
|
pub(crate) fn start(
|
2020-12-29 00:44:53 +01:00
|
|
|
poll: Poll,
|
|
|
|
waker: WakerQueue,
|
|
|
|
socks: Vec<(Token, MioListener)>,
|
2018-12-10 06:51:35 +01:00
|
|
|
srv: Server,
|
2020-12-29 00:44:53 +01:00
|
|
|
handles: Vec<WorkerHandle>,
|
2018-08-19 19:47:04 +02:00
|
|
|
) {
|
2020-12-29 00:44:53 +01:00
|
|
|
// Accept runs in its own thread and would want to spawn additional futures to current
|
|
|
|
// actix system.
|
2018-08-19 19:47:04 +02:00
|
|
|
let sys = System::current();
|
2020-12-29 00:44:53 +01:00
|
|
|
thread::Builder::new()
|
2018-12-11 03:08:07 +01:00
|
|
|
.name("actix-server accept loop".to_owned())
|
2018-08-19 19:47:04 +02:00
|
|
|
.spawn(move || {
|
|
|
|
System::set_current(sys);
|
2020-12-29 00:44:53 +01:00
|
|
|
let (mut accept, sockets) =
|
|
|
|
Accept::new_with_sockets(poll, waker, socks, handles, srv);
|
|
|
|
accept.poll_with(sockets);
|
|
|
|
})
|
|
|
|
.unwrap();
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn new_with_sockets(
|
|
|
|
poll: Poll,
|
|
|
|
waker: WakerQueue,
|
|
|
|
socks: Vec<(Token, MioListener)>,
|
|
|
|
handles: Vec<WorkerHandle>,
|
2018-12-10 06:51:35 +01:00
|
|
|
srv: Server,
|
2020-12-29 00:44:53 +01:00
|
|
|
) -> (Accept, Slab<ServerSocketInfo>) {
|
2018-08-19 19:47:04 +02:00
|
|
|
let mut sockets = Slab::new();
|
2020-12-29 00:44:53 +01:00
|
|
|
for (hnd_token, mut lst) in socks.into_iter() {
|
2019-07-18 13:05:40 +02:00
|
|
|
let addr = lst.local_addr();
|
2018-08-19 19:47:04 +02:00
|
|
|
|
|
|
|
let entry = sockets.vacant_entry();
|
|
|
|
let token = entry.key();
|
|
|
|
|
|
|
|
// Start listening for incoming connections
|
2020-12-29 00:44:53 +01:00
|
|
|
poll.registry()
|
|
|
|
.register(&mut lst, MioToken(token), Interest::READABLE)
|
|
|
|
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
|
2018-08-19 19:47:04 +02:00
|
|
|
|
|
|
|
entry.insert(ServerSocketInfo {
|
|
|
|
addr,
|
|
|
|
token: hnd_token,
|
2020-12-29 00:44:53 +01:00
|
|
|
lst,
|
2018-08-19 19:47:04 +02:00
|
|
|
timeout: None,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
let accept = Accept {
|
2018-08-19 19:47:04 +02:00
|
|
|
poll,
|
2020-12-29 00:44:53 +01:00
|
|
|
waker,
|
|
|
|
handles,
|
2018-08-19 19:47:04 +02:00
|
|
|
srv,
|
|
|
|
next: 0,
|
|
|
|
backpressure: false,
|
2020-12-29 00:44:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
(accept, sockets)
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
|
2018-08-19 19:47:04 +02:00
|
|
|
let mut events = mio::Events::with_capacity(128);
|
|
|
|
|
|
|
|
loop {
|
2021-02-04 11:20:37 +01:00
|
|
|
if let Err(e) = self.poll.poll(&mut events, None) {
|
|
|
|
match e.kind() {
|
|
|
|
std::io::ErrorKind::Interrupted => {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
panic!("Poll error: {}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-19 19:47:04 +02:00
|
|
|
|
|
|
|
for event in events.iter() {
|
|
|
|
let token = event.token();
|
|
|
|
match token {
|
2020-12-29 00:44:53 +01:00
|
|
|
// This is a loop because interests for command from previous version was
|
|
|
|
// a loop that would try to drain the command channel. It's yet unknown
|
|
|
|
// if it's necessary/good practice to actively drain the waker queue.
|
|
|
|
WAKER_TOKEN => 'waker: loop {
|
|
|
|
// take guard with every iteration so no new interest can be added
|
|
|
|
// until the current task is done.
|
|
|
|
let mut guard = self.waker.guard();
|
|
|
|
match guard.pop_front() {
|
|
|
|
// worker notify it becomes available. we may want to recover
|
2021-04-01 09:25:24 +02:00
|
|
|
// from backpressure.
|
2020-12-29 00:44:53 +01:00
|
|
|
Some(WakerInterest::WorkerAvailable) => {
|
|
|
|
drop(guard);
|
|
|
|
self.maybe_backpressure(&mut sockets, false);
|
|
|
|
}
|
2021-04-01 09:25:24 +02:00
|
|
|
// a new worker thread is made and it's handle would be added to Accept
|
2020-12-29 00:44:53 +01:00
|
|
|
Some(WakerInterest::Worker(handle)) => {
|
|
|
|
drop(guard);
|
|
|
|
// maybe we want to recover from a backpressure.
|
|
|
|
self.maybe_backpressure(&mut sockets, false);
|
|
|
|
self.handles.push(handle);
|
|
|
|
}
|
2021-04-01 09:25:24 +02:00
|
|
|
// got timer interest and it's time to try register socket(s) again
|
2020-12-29 00:44:53 +01:00
|
|
|
Some(WakerInterest::Timer) => {
|
|
|
|
drop(guard);
|
|
|
|
self.process_timer(&mut sockets)
|
|
|
|
}
|
|
|
|
Some(WakerInterest::Pause) => {
|
|
|
|
drop(guard);
|
2021-03-29 09:19:37 +02:00
|
|
|
self.deregister_all(&mut sockets);
|
2020-12-29 00:44:53 +01:00
|
|
|
}
|
|
|
|
Some(WakerInterest::Resume) => {
|
|
|
|
drop(guard);
|
|
|
|
sockets.iter_mut().for_each(|(token, info)| {
|
|
|
|
self.register_logged(token, info);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
Some(WakerInterest::Stop) => {
|
|
|
|
return self.deregister_all(&mut sockets);
|
|
|
|
}
|
2021-03-27 00:37:01 +01:00
|
|
|
// waker queue is drained
|
2020-12-29 00:44:53 +01:00
|
|
|
None => {
|
2021-03-27 00:37:01 +01:00
|
|
|
// Reset the WakerQueue before break so it does not grow infinitely
|
2020-12-29 00:44:53 +01:00
|
|
|
WakerQueue::reset(&mut guard);
|
|
|
|
break 'waker;
|
|
|
|
}
|
2018-10-29 23:48:56 +01:00
|
|
|
}
|
2020-12-29 00:44:53 +01:00
|
|
|
},
|
2018-08-19 19:47:04 +02:00
|
|
|
_ => {
|
|
|
|
let token = usize::from(token);
|
2020-12-29 00:44:53 +01:00
|
|
|
self.accept(&mut sockets, token);
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
2018-08-19 19:47:04 +02:00
|
|
|
let now = Instant::now();
|
2021-04-01 09:25:24 +02:00
|
|
|
sockets
|
|
|
|
.iter_mut()
|
|
|
|
// Only sockets that had an associated timeout were deregistered.
|
|
|
|
.filter(|(_, info)| info.timeout.is_some())
|
|
|
|
.for_each(|(token, info)| {
|
|
|
|
let inst = info.timeout.take().unwrap();
|
|
|
|
|
|
|
|
if now < inst {
|
2018-08-19 19:47:04 +02:00
|
|
|
info.timeout = Some(inst);
|
2021-04-01 09:25:24 +02:00
|
|
|
} else if !self.backpressure {
|
|
|
|
self.register_logged(token, info);
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
2021-04-01 09:25:24 +02:00
|
|
|
|
|
|
|
// Drop the timeout if server is in backpressure and socket timeout is expired.
|
|
|
|
// When server recovers from backpressure it will register all sockets without
|
|
|
|
// a timeout value so this socket register will be delayed till then.
|
|
|
|
});
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2020-02-26 10:40:31 +01:00
|
|
|
#[cfg(not(target_os = "windows"))]
|
2020-12-29 00:44:53 +01:00
|
|
|
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
|
|
|
self.poll
|
|
|
|
.registry()
|
|
|
|
.register(&mut info.lst, MioToken(token), Interest::READABLE)
|
2020-02-26 10:40:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_os = "windows")]
|
2020-12-29 00:44:53 +01:00
|
|
|
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
2020-02-26 10:40:31 +01:00
|
|
|
// On windows, calling register without deregister cause an error.
|
|
|
|
// See https://github.com/actix/actix-web/issues/905
|
|
|
|
// Calling reregister seems to fix the issue.
|
|
|
|
self.poll
|
2020-12-29 00:44:53 +01:00
|
|
|
.registry()
|
|
|
|
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
|
2020-02-26 10:40:31 +01:00
|
|
|
.or_else(|_| {
|
2020-12-29 00:44:53 +01:00
|
|
|
self.poll.registry().reregister(
|
|
|
|
&mut info.lst,
|
|
|
|
mio::Token(token),
|
|
|
|
Interest::READABLE,
|
2020-02-26 10:40:31 +01:00
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
|
|
|
|
match self.register(token, info) {
|
|
|
|
Ok(_) => info!("Resume accepting connections on {}", info.addr),
|
|
|
|
Err(e) => error!("Can not register server socket {}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
|
|
|
self.poll.registry().deregister(&mut info.lst)
|
|
|
|
}
|
|
|
|
|
2021-03-29 09:19:37 +02:00
|
|
|
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
|
|
|
|
match self.deregister(info) {
|
|
|
|
Ok(_) => info!("Paused accepting connections on {}", info.addr),
|
|
|
|
Err(e) => {
|
|
|
|
error!("Can not deregister server socket {}", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
2021-04-05 14:38:41 +02:00
|
|
|
// This is a best effort implementation with following limitation:
|
|
|
|
//
|
|
|
|
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
|
|
|
|
// is removed in the process.
|
|
|
|
//
|
|
|
|
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
|
|
|
|
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
|
|
|
|
// before expected timing.
|
|
|
|
sockets
|
|
|
|
.iter_mut()
|
|
|
|
// Take all timeout.
|
|
|
|
// This is to prevent Accept::process_timer method re-register a socket afterwards.
|
|
|
|
.map(|(_, info)| (info.timeout.take(), info))
|
|
|
|
// Socket info with a timeout is already deregistered so skip them.
|
|
|
|
.filter(|(timeout, _)| timeout.is_none())
|
|
|
|
.for_each(|(_, info)| self.deregister_logged(info));
|
2020-12-29 00:44:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
|
2021-04-01 09:25:24 +02:00
|
|
|
// Only operate when server is in a different backpressure than the given flag.
|
|
|
|
if self.backpressure != on {
|
2021-04-05 14:38:41 +02:00
|
|
|
self.backpressure = on;
|
|
|
|
sockets
|
|
|
|
.iter_mut()
|
|
|
|
// Only operate on sockets without associated timeout.
|
|
|
|
// Sockets with it should be handled by `accept` and `process_timer` methods.
|
|
|
|
// They are already deregistered or need to be reregister in the future.
|
|
|
|
.filter(|(_, info)| info.timeout.is_none())
|
|
|
|
.for_each(|(token, info)| {
|
|
|
|
if on {
|
|
|
|
self.deregister_logged(info);
|
|
|
|
} else {
|
|
|
|
self.register_logged(token, info);
|
|
|
|
}
|
|
|
|
});
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 08:45:49 +02:00
|
|
|
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut conn: Conn) {
|
2018-08-19 19:47:04 +02:00
|
|
|
if self.backpressure {
|
2021-04-01 08:45:49 +02:00
|
|
|
// send_connection would remove fault worker from handles.
|
|
|
|
// worst case here is conn get dropped after all handles are gone.
|
2020-12-29 00:44:53 +01:00
|
|
|
while !self.handles.is_empty() {
|
2021-04-01 08:45:49 +02:00
|
|
|
match self.send_connection(sockets, conn) {
|
|
|
|
Ok(_) => return,
|
|
|
|
Err(c) => conn = c,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2021-04-01 08:45:49 +02:00
|
|
|
// Do one round and try to send conn to all workers until it succeed.
|
|
|
|
// Start from self.next.
|
2018-08-19 19:47:04 +02:00
|
|
|
let mut idx = 0;
|
2020-12-29 00:44:53 +01:00
|
|
|
while idx < self.handles.len() {
|
2018-08-19 19:47:04 +02:00
|
|
|
idx += 1;
|
2020-12-29 00:44:53 +01:00
|
|
|
if self.handles[self.next].available() {
|
2021-04-01 08:45:49 +02:00
|
|
|
match self.send_connection(sockets, conn) {
|
|
|
|
Ok(_) => return,
|
|
|
|
Err(c) => conn = c,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
2021-04-01 08:45:49 +02:00
|
|
|
} else {
|
|
|
|
self.set_next();
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
2021-04-01 08:45:49 +02:00
|
|
|
// Sending Conn failed due to either all workers are in error or not available.
|
|
|
|
// Enter backpressure state and try again.
|
2020-12-29 00:44:53 +01:00
|
|
|
self.maybe_backpressure(sockets, true);
|
2021-04-01 08:45:49 +02:00
|
|
|
self.accept_one(sockets, conn);
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 08:45:49 +02:00
|
|
|
// Set next worker handle that would accept work.
|
2020-12-29 00:44:53 +01:00
|
|
|
fn set_next(&mut self) {
|
|
|
|
self.next = (self.next + 1) % self.handles.len();
|
|
|
|
}
|
|
|
|
|
2021-04-01 08:45:49 +02:00
|
|
|
// Send connection to worker and handle error.
|
|
|
|
fn send_connection(
|
|
|
|
&mut self,
|
|
|
|
sockets: &mut Slab<ServerSocketInfo>,
|
|
|
|
conn: Conn,
|
|
|
|
) -> Result<(), Conn> {
|
|
|
|
match self.handles[self.next].send(conn) {
|
|
|
|
Ok(_) => {
|
|
|
|
self.set_next();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
Err(conn) => {
|
|
|
|
// worker lost contact and could be gone. a message is sent to
|
|
|
|
// `ServerBuilder` future to notify it a new worker should be made.
|
|
|
|
// after that remove the fault worker and enter backpressure if necessary.
|
|
|
|
self.srv.worker_faulted(self.handles[self.next].idx);
|
|
|
|
self.handles.swap_remove(self.next);
|
|
|
|
if self.handles.is_empty() {
|
|
|
|
error!("No workers");
|
|
|
|
self.maybe_backpressure(sockets, true);
|
|
|
|
// All workers are gone and Conn is nowhere to be sent.
|
|
|
|
// Treat this situation as Ok and drop Conn.
|
|
|
|
return Ok(());
|
|
|
|
} else if self.handles.len() <= self.next {
|
|
|
|
self.next = 0;
|
|
|
|
}
|
|
|
|
Err(conn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-29 00:44:53 +01:00
|
|
|
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
|
2021-04-09 22:04:41 +02:00
|
|
|
loop {
|
2021-03-29 09:19:37 +02:00
|
|
|
let info = sockets
|
|
|
|
.get_mut(token)
|
|
|
|
.expect("ServerSocketInfo is removed from Slab");
|
|
|
|
|
|
|
|
match info.lst.accept() {
|
2021-04-01 07:55:33 +02:00
|
|
|
Ok(io) => {
|
2021-03-29 09:19:37 +02:00
|
|
|
let msg = Conn {
|
2018-08-19 19:47:04 +02:00
|
|
|
io,
|
|
|
|
token: info.token,
|
2021-03-29 09:19:37 +02:00
|
|
|
};
|
|
|
|
self.accept_one(sockets, msg);
|
|
|
|
}
|
|
|
|
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
|
|
|
Err(ref e) if connection_error(e) => continue,
|
|
|
|
Err(e) => {
|
|
|
|
error!("Error accepting connection: {}", e);
|
|
|
|
|
|
|
|
// deregister listener temporary
|
|
|
|
self.deregister_logged(info);
|
|
|
|
|
|
|
|
// sleep after error. write the timeout to socket info as later
|
|
|
|
// the poll would need it mark which socket and when it's
|
|
|
|
// listener should be registered
|
|
|
|
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
|
|
|
|
|
|
|
// after the sleep a Timer interest is sent to Accept Poll
|
|
|
|
let waker = self.waker.clone();
|
|
|
|
System::current().arbiter().spawn(async move {
|
|
|
|
sleep(Duration::from_millis(510)).await;
|
|
|
|
waker.wake(WakerInterest::Timer);
|
|
|
|
});
|
|
|
|
|
|
|
|
return;
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|