1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-02-25 15:02:50 +01:00

update actix-server and actix-testing to tokio 1.0 (#239)

This commit is contained in:
fakeshadow 2020-12-29 07:44:53 +08:00 committed by GitHub
parent a09f9abfcb
commit b7202db8fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1008 additions and 841 deletions

View File

@ -3,6 +3,12 @@
## Unreleased - 2020-xx-xx ## Unreleased - 2020-xx-xx
* Added explicit info log message on accept queue pause. [#215] * Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223] * Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`.
* Remove `socket2` dependency.
* `ServerBuilder::backlog` would accept `u32` instead of `i32`.
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` for wake up the `Accept`'s `Poll`.
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using `FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows).
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait.
[#215]: https://github.com/actix/actix-net/pull/215 [#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223 [#223]: https://github.com/actix/actix-net/pull/223

View File

@ -20,25 +20,21 @@ path = "src/lib.rs"
default = [] default = []
[dependencies] [dependencies]
actix-service = "1.0.6" actix-codec = "0.4.0-beta.1"
actix-rt = "1.1.1" actix-rt = "2.0.0-beta.1"
actix-codec = "0.3.0" actix-service = "2.0.0-beta.1"
actix-utils = "2.0.0" actix-utils = "3.0.0-beta.1"
futures-core = { version = "0.3.7", default-features = false }
log = "0.4" log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13" num_cpus = "1.13"
mio = "0.6.19"
socket2 = "0.3"
futures-channel = { version = "0.3.4", default-features = false }
futures-util = { version = "0.3.4", default-features = false, features = ["sink"] }
slab = "0.4" slab = "0.4"
tokio = { version = "1", features = ["sync"] }
# unix domain sockets
# FIXME: Remove it and use mio own uds feature once mio 0.7 is released
mio-uds = { version = "0.6.7" }
[dev-dependencies] [dev-dependencies]
bytes = "0.5"
env_logger = "0.7"
actix-testing = "1.0.0" actix-testing = "1.0.0"
tokio = { version = "0.2", features = ["io-util"] } bytes = "1"
env_logger = "0.7"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

View File

@ -1,120 +1,86 @@
use std::sync::mpsc as sync_mpsc;
use std::time::Duration; use std::time::Duration;
use std::{io, thread}; use std::{io, thread};
use actix_rt::time::{delay_until, Instant}; use actix_rt::time::{sleep_until, Instant};
use actix_rt::System; use actix_rt::System;
use log::{error, info}; use log::{error, info};
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab; use slab::Slab;
use crate::server::Server; use crate::server::Server;
use crate::socket::{SocketAddr, SocketListener, StdListener}; use crate::socket::{MioListener, SocketAddr};
use crate::worker::{Conn, WorkerClient}; use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::Token; use crate::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo { struct ServerSocketInfo {
// addr for socket. mainly used for logging.
addr: SocketAddr, addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token, token: Token,
sock: SocketListener, lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>, timeout: Option<Instant>,
} }
#[derive(Clone)] /// Accept loop would live with `ServerBuilder`.
pub(crate) struct AcceptNotify(mio::SetReadiness); ///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
impl AcceptNotify { /// `Accept` and `Worker`.
pub(crate) fn new(ready: mio::SetReadiness) -> Self { ///
AcceptNotify(ready) /// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop { pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>, srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
} }
impl AcceptLoop { impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop { pub fn new(srv: Server) -> Self {
let (tx, rx) = sync_mpsc::channel(); let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let (cmd_reg, cmd_ready) = mio::Registration::new2(); let waker = WakerQueue::new(poll.registry())
let (notify_reg, notify_ready) = mio::Registration::new2(); .unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
AcceptLoop { Self {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(srv), srv: Some(srv),
poll: Some(poll),
waker,
} }
} }
pub fn send(&self, msg: Command) { pub(crate) fn waker_owned(&self) -> WakerQueue {
let _ = self.tx.send(msg); self.waker.clone()
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
} }
pub fn get_notify(&self) -> AcceptNotify { pub fn wake(&self, i: WakerInterest) {
AcceptNotify::new(self.notify_ready.clone()) self.waker.wake(i);
} }
pub(crate) fn start( pub(crate) fn start(
&mut self, &mut self,
socks: Vec<(Token, StdListener)>, socks: Vec<(Token, MioListener)>,
workers: Vec<WorkerClient>, handles: Vec<WorkerHandle>,
) { ) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo"); let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start( Accept::start(poll, waker, socks, srv, handles);
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
} }
} }
/// poll instance of the server.
struct Accept { struct Accept {
poll: mio::Poll, poll: Poll,
rx: sync_mpsc::Receiver<Command>, waker: WakerQueue,
sockets: Slab<ServerSocketInfo>, handles: Vec<WorkerHandle>,
workers: Vec<WorkerClient>,
srv: Server, srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize, next: usize,
backpressure: bool, backpressure: bool,
} }
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically /// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means /// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted. /// next connection might be ready to be accepted.
@ -129,326 +95,290 @@ fn connection_error(e: &io::Error) -> bool {
} }
impl Accept { impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start( pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, poll: Poll,
cmd_reg: mio::Registration, waker: WakerQueue,
notify_reg: mio::Registration, socks: Vec<(Token, MioListener)>,
socks: Vec<(Token, StdListener)>,
srv: Server, srv: Server,
workers: Vec<WorkerClient>, handles: Vec<WorkerHandle>,
) { ) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current(); let sys = System::current();
thread::Builder::new()
// start accept thread
let _ = thread::Builder::new()
.name("actix-server accept loop".to_owned()) .name("actix-server accept loop".to_owned())
.spawn(move || { .spawn(move || {
System::set_current(sys); System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv); let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
// Start listening for incoming commands accept.poll_with(sockets);
if let Err(err) = accept.poll.register( })
&cmd_reg, .unwrap();
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
} }
fn new( fn new_with_sockets(
rx: sync_mpsc::Receiver<Command>, poll: Poll,
socks: Vec<(Token, StdListener)>, waker: WakerQueue,
workers: Vec<WorkerClient>, socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server, srv: Server,
) -> Accept { ) -> (Accept, Slab<ServerSocketInfo>) {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new(); let mut sockets = Slab::new();
for (hnd_token, lst) in socks.into_iter() { for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr(); let addr = lst.local_addr();
let server = lst.into_listener();
let entry = sockets.vacant_entry(); let entry = sockets.vacant_entry();
let token = entry.key(); let token = entry.key();
// Start listening for incoming connections // Start listening for incoming connections
if let Err(err) = poll.register( poll.registry()
&server, .register(&mut lst, MioToken(token), Interest::READABLE)
mio::Token(token + DELTA), .unwrap_or_else(|e| panic!("Can not register io: {}", e));
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo { entry.insert(ServerSocketInfo {
addr, addr,
token: hnd_token, token: hnd_token,
sock: server, lst,
timeout: None, timeout: None,
}); });
} }
// Timer let accept = Accept {
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll, poll,
rx, waker,
sockets, handles,
workers,
srv, srv,
next: 0, next: 0,
timer: (tm, tmr),
backpressure: false, backpressure: false,
} };
(accept, sockets)
} }
fn poll(&mut self) { fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
// Create storage for events
let mut events = mio::Events::with_capacity(128); let mut events = mio::Events::with_capacity(128);
loop { loop {
if let Err(err) = self.poll.poll(&mut events, None) { self.poll
panic!("Poll error: {}", err); .poll(&mut events, None)
} .unwrap_or_else(|e| panic!("Poll error: {}", e));
for event in events.iter() { for event in events.iter() {
let token = event.token(); let token = event.token();
match token { match token {
CMD => { // This is a loop because interests for command from previous version was
if !self.process_cmd() { // a loop that would try to drain the command channel. It's yet unknown
return; // if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
} }
} },
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => { _ => {
let token = usize::from(token); let token = usize::from(token);
if token < DELTA { self.accept(&mut sockets, token);
continue;
}
self.accept(token - DELTA);
} }
} }
} }
} }
} }
fn process_timer(&mut self) { fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now(); let now = Instant::now();
for (token, info) in self.sockets.iter_mut() { sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() { if let Some(inst) = info.timeout.take() {
if now > inst { if now > inst {
if let Err(err) = self.poll.register( self.register_logged(token, info);
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else { } else {
info.timeout = Some(inst); info.timeout = Some(inst);
} }
} }
} });
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.register(token, info) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
} }
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
fn register(&self, token: usize, info: &ServerSocketInfo) -> io::Result<()> { fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.register( self.poll
&info.sock, .registry()
mio::Token(token + DELTA), .register(&mut info.lst, MioToken(token), Interest::READABLE)
mio::Ready::readable(),
mio::PollOpt::edge(),
)
} }
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
fn register(&self, token: usize, info: &ServerSocketInfo) -> io::Result<()> { fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error. // On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905 // See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue. // Calling reregister seems to fix the issue.
self.poll self.poll
.register( .registry()
&info.sock, .register(&mut info.lst, mio::Token(token), Interest::READABLE)
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
)
.or_else(|_| { .or_else(|_| {
self.poll.reregister( self.poll.registry().reregister(
&info.sock, &mut info.lst,
mio::Token(token + DELTA), mio::Token(token),
mio::Ready::readable(), Interest::READABLE,
mio::PollOpt::edge(),
) )
}) })
} }
fn backpressure(&mut self, on: bool) { fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure { if self.backpressure {
if !on { if !on {
self.backpressure = false; self.backpressure = false;
for (token, info) in self.sockets.iter() { for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() { if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes // socket will attempt to re-register itself when its timeout completes
continue; continue;
} }
self.register_logged(token, info);
if let Err(err) = self.register(token, info) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
} }
} }
} else if on { } else if on {
self.backpressure = true; self.backpressure = true;
for (_, info) in self.sockets.iter() { self.deregister_all(sockets);
let _ = self.poll.deregister(&info.sock);
info!("Accepting connections on {} has been paused", info.addr);
}
} }
} }
fn accept_one(&mut self, mut msg: Conn) { fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure { if self.backpressure {
while !self.workers.is_empty() { while !self.handles.is_empty() {
match self.workers[self.next].send(msg) { match self.handles[self.next].send(msg) {
Ok(_) => (), Ok(_) => {
self.set_next();
break;
}
Err(tmp) => { Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx); // worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp; msg = tmp;
self.workers.swap_remove(self.next); self.handles.swap_remove(self.next);
if self.workers.is_empty() { if self.handles.is_empty() {
error!("No workers"); error!("No workers");
return; return;
} else if self.workers.len() <= self.next { } else if self.handles.len() <= self.next {
self.next = 0; self.next = 0;
} }
continue; continue;
} }
} }
self.next = (self.next + 1) % self.workers.len();
break;
} }
} else { } else {
let mut idx = 0; let mut idx = 0;
while idx < self.workers.len() { while idx < self.handles.len() {
idx += 1; idx += 1;
if self.workers[self.next].available() { if self.handles[self.next].available() {
match self.workers[self.next].send(msg) { match self.handles[self.next].send(msg) {
Ok(_) => { Ok(_) => {
self.next = (self.next + 1) % self.workers.len(); self.set_next();
return; return;
} }
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => { Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx); self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp; msg = tmp;
self.workers.swap_remove(self.next); self.handles.swap_remove(self.next);
if self.workers.is_empty() { if self.handles.is_empty() {
error!("No workers"); error!("No workers");
self.backpressure(true); self.maybe_backpressure(sockets, true);
return; return;
} else if self.workers.len() <= self.next { } else if self.handles.len() <= self.next {
self.next = 0; self.next = 0;
} }
continue; continue;
} }
} }
} }
self.next = (self.next + 1) % self.workers.len(); self.set_next();
} }
// enable backpressure // enable backpressure
self.backpressure(true); self.maybe_backpressure(sockets, true);
self.accept_one(msg); self.accept_one(sockets, msg);
} }
} }
fn accept(&mut self, token: usize) { // set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop { loop {
let msg = if let Some(info) = self.sockets.get_mut(token) { let msg = if let Some(info) = sockets.get_mut(token) {
match info.sock.accept() { match info.lst.accept() {
Ok(Some((io, addr))) => Conn { Ok(Some((io, addr))) => Conn {
io, io,
token: info.token, token: info.token,
@ -458,18 +388,22 @@ impl Accept {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue, Err(ref e) if connection_error(e) => continue,
Err(e) => { Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e); error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) { if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err); error!("Can not deregister server socket {}", err);
} }
// sleep after error // sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500)); info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone(); // after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().send(Box::pin(async move { System::current().arbiter().send(Box::pin(async move {
delay_until(Instant::now() + Duration::from_millis(510)).await; sleep_until(Instant::now() + Duration::from_millis(510)).await;
let _ = r.set_readiness(mio::Ready::readable()); waker.wake(WakerInterest::Timer);
})); }));
return; return;
} }
@ -478,7 +412,7 @@ impl Accept {
return; return;
}; };
self.accept_one(msg); self.accept_one(sockets, msg);
} }
} }
} }

View File

@ -1,36 +1,35 @@
use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use std::{io, mem, net}; use std::{io, mem};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_rt::time::{delay_until, Instant}; use actix_rt::time::{sleep_until, Instant};
use actix_rt::{spawn, System}; use actix_rt::{spawn, System};
use futures_channel::mpsc::{unbounded, UnboundedReceiver};
use futures_channel::oneshot;
use futures_util::future::ready;
use futures_util::stream::FuturesUnordered;
use futures_util::{future::Future, ready, stream::Stream, FutureExt, StreamExt};
use log::{error, info}; use log::{error, info};
use socket2::{Domain, Protocol, Socket, Type}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use crate::accept::{AcceptLoop, AcceptNotify, Command}; use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig}; use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand}; use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService}; use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals}; use crate::signals::{Signal, Signals};
use crate::socket::StdListener; use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient}; use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::Token; use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, Worker, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
/// Server builder /// Server builder
pub struct ServerBuilder { pub struct ServerBuilder {
threads: usize, threads: usize,
token: Token, token: Token,
backlog: i32, backlog: u32,
workers: Vec<(usize, WorkerClient)>, handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>, services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, StdListener)>, sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop, accept: AcceptLoop,
exit: bool, exit: bool,
shutdown_timeout: Duration, shutdown_timeout: Duration,
@ -49,13 +48,13 @@ impl Default for ServerBuilder {
impl ServerBuilder { impl ServerBuilder {
/// Create new Server builder instance /// Create new Server builder instance
pub fn new() -> ServerBuilder { pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded(); let (tx, rx) = unbounded_channel();
let server = Server::new(tx); let server = Server::new(tx);
ServerBuilder { ServerBuilder {
threads: num_cpus::get(), threads: num_cpus::get(),
token: Token(0), token: Token::default(),
workers: Vec::new(), handles: Vec::new(),
services: Vec::new(), services: Vec::new(),
sockets: Vec::new(), sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()), accept: AcceptLoop::new(server.clone()),
@ -89,7 +88,7 @@ impl ServerBuilder {
/// Generally set in the 64-2048 range. Default value is 2048. /// Generally set in the 64-2048 range. Default value is 2048.
/// ///
/// This method should be called before `bind()` method call. /// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self { pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num; self.backlog = num;
self self
} }
@ -147,7 +146,7 @@ impl ServerBuilder {
for (name, lst) in cfg.services { for (name, lst) in cfg.services {
let token = self.token.next(); let token = self.token.next();
srv.stream(token, name.clone(), lst.local_addr()?); srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, StdListener::Tcp(lst))); self.sockets.push((token, name, MioListener::Tcp(lst)));
} }
self.services.push(Box::new(srv)); self.services.push(Box::new(srv));
} }
@ -160,7 +159,7 @@ impl ServerBuilder {
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self> pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where where
F: ServiceFactory<TcpStream>, F: ServiceFactory<TcpStream>,
U: net::ToSocketAddrs, U: ToSocketAddrs,
{ {
let sockets = bind_addr(addr, self.backlog)?; let sockets = bind_addr(addr, self.backlog)?;
@ -173,12 +172,12 @@ impl ServerBuilder {
lst.local_addr()?, lst.local_addr()?,
)); ));
self.sockets self.sockets
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst))); .push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
} }
Ok(self) Ok(self)
} }
#[cfg(all(unix))] #[cfg(unix)]
/// Add new unix domain service to the server. /// Add new unix domain service to the server.
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self> pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where where
@ -186,8 +185,6 @@ impl ServerBuilder {
N: AsRef<str>, N: AsRef<str>,
U: AsRef<std::path::Path>, U: AsRef<std::path::Path>,
{ {
use std::os::unix::net::UnixListener;
// The path must not exist when we try to bind. // The path must not exist when we try to bind.
// Try to remove it to avoid bind error. // Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) { if let Err(e) = std::fs::remove_file(addr.as_ref()) {
@ -197,26 +194,27 @@ impl ServerBuilder {
} }
} }
let lst = UnixListener::bind(addr)?; let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory) self.listen_uds(name, lst, factory)
} }
#[cfg(all(unix))] #[cfg(unix)]
/// Add new unix domain service to the server. /// Add new unix domain service to the server.
/// Useful when running as a systemd service and /// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate. /// a socket FD can be acquired using the systemd crate.
pub fn listen_uds<F, N: AsRef<str>>( pub fn listen_uds<F, N: AsRef<str>>(
mut self, mut self,
name: N, name: N,
lst: std::os::unix::net::UnixListener, lst: crate::socket::StdUnixListener,
factory: F, factory: F,
) -> io::Result<Self> ) -> io::Result<Self>
where where
F: ServiceFactory<actix_rt::net::UnixStream>, F: ServiceFactory<actix_rt::net::UnixStream>,
{ {
use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.token.next(); let token = self.token.next();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create( self.services.push(StreamNewService::create(
name.as_ref().to_string(), name.as_ref().to_string(),
token, token,
@ -224,7 +222,7 @@ impl ServerBuilder {
addr, addr,
)); ));
self.sockets self.sockets
.push((token, name.as_ref().to_string(), StdListener::Uds(lst))); .push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self) Ok(self)
} }
@ -232,21 +230,25 @@ impl ServerBuilder {
pub fn listen<F, N: AsRef<str>>( pub fn listen<F, N: AsRef<str>>(
mut self, mut self,
name: N, name: N,
lst: net::TcpListener, lst: StdTcpListener,
factory: F, factory: F,
) -> io::Result<Self> ) -> io::Result<Self>
where where
F: ServiceFactory<TcpStream>, F: ServiceFactory<TcpStream>,
{ {
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next(); let token = self.token.next();
self.services.push(StreamNewService::create( self.services.push(StreamNewService::create(
name.as_ref().to_string(), name.as_ref().to_string(),
token, token,
factory, factory,
lst.local_addr()?, addr,
)); ));
self.sockets self.sockets
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst))); .push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self) Ok(self)
} }
@ -263,12 +265,12 @@ impl ServerBuilder {
info!("Starting {} workers", self.threads); info!("Starting {} workers", self.threads);
// start workers // start workers
let workers = (0..self.threads) let handles = (0..self.threads)
.map(|idx| { .map(|idx| {
let worker = self.start_worker(idx, self.accept.get_notify()); let handle = self.start_worker(idx, self.accept.waker_owned());
self.workers.push((idx, worker.clone())); self.handles.push((idx, handle.clone()));
worker handle
}) })
.collect(); .collect();
@ -281,7 +283,7 @@ impl ServerBuilder {
.into_iter() .into_iter()
.map(|t| (t.0, t.2)) .map(|t| (t.0, t.2))
.collect(), .collect(),
workers, handles,
); );
// handle signals // handle signals
@ -296,10 +298,9 @@ impl ServerBuilder {
} }
} }
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient { fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(notify); let avail = WorkerAvailability::new(waker);
let services: Vec<Box<dyn InternalServiceFactory>> = let services = self.services.iter().map(|v| v.clone_factory()).collect();
self.services.iter().map(|v| v.clone_factory()).collect();
Worker::start(idx, services, avail, self.shutdown_timeout) Worker::start(idx, services, avail, self.shutdown_timeout)
} }
@ -307,11 +308,11 @@ impl ServerBuilder {
fn handle_cmd(&mut self, item: ServerCommand) { fn handle_cmd(&mut self, item: ServerCommand) {
match item { match item {
ServerCommand::Pause(tx) => { ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause); self.accept.wake(WakerInterest::Pause);
let _ = tx.send(()); let _ = tx.send(());
} }
ServerCommand::Resume(tx) => { ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume); self.accept.wake(WakerInterest::Resume);
let _ = tx.send(()); let _ = tx.send(());
} }
ServerCommand::Signal(sig) => { ServerCommand::Signal(sig) => {
@ -355,50 +356,41 @@ impl ServerBuilder {
let exit = self.exit; let exit = self.exit;
// stop accept thread // stop accept thread
self.accept.send(Command::Stop); self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify); let notify = std::mem::take(&mut self.notify);
// stop workers // stop workers
if !self.workers.is_empty() && graceful { if !self.handles.is_empty() && graceful {
spawn( let iter = self
self.workers .handles
.iter() .iter()
.map(move |worker| worker.1.stop(graceful)) .map(move |worker| worker.1.stop(graceful))
.collect::<FuturesUnordered<_>>() .collect();
.collect::<Vec<_>>()
.then(move |_| { let fut = join_all(iter);
if let Some(tx) = completion {
let _ = tx.send(()); spawn(async move {
} let _ = fut.await;
for tx in notify { if let Some(tx) = completion {
let _ = tx.send(()); let _ = tx.send(());
} }
if exit { for tx in notify {
spawn( let _ = tx.send(());
async { }
delay_until( if exit {
Instant::now() + Duration::from_millis(300), spawn(async {
) sleep_until(Instant::now() + Duration::from_millis(300)).await;
.await; System::current().stop();
System::current().stop(); });
} }
.boxed(), })
);
}
ready(())
}),
)
} else { } else {
// we need to stop system if server was spawned // we need to stop system if server was spawned
if self.exit { if self.exit {
spawn( spawn(async {
delay_until(Instant::now() + Duration::from_millis(300)).then( sleep_until(Instant::now() + Duration::from_millis(300)).await;
|_| { System::current().stop();
System::current().stop(); });
ready(())
},
),
);
} }
if let Some(tx) = completion { if let Some(tx) = completion {
let _ = tx.send(()); let _ = tx.send(());
@ -410,9 +402,9 @@ impl ServerBuilder {
} }
ServerCommand::WorkerFaulted(idx) => { ServerCommand::WorkerFaulted(idx) => {
let mut found = false; let mut found = false;
for i in 0..self.workers.len() { for i in 0..self.handles.len() {
if self.workers[i].0 == idx { if self.handles[i].0 == idx {
self.workers.swap_remove(i); self.handles.swap_remove(i);
found = true; found = true;
break; break;
} }
@ -421,10 +413,10 @@ impl ServerBuilder {
if found { if found {
error!("Worker has died {:?}, restarting", idx); error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len(); let mut new_idx = self.handles.len();
'found: loop { 'found: loop {
for i in 0..self.workers.len() { for i in 0..self.handles.len() {
if self.workers[i].0 == new_idx { if self.handles[i].0 == new_idx {
new_idx += 1; new_idx += 1;
continue 'found; continue 'found;
} }
@ -432,9 +424,9 @@ impl ServerBuilder {
break; break;
} }
let worker = self.start_worker(new_idx, self.accept.get_notify()); let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.workers.push((new_idx, worker.clone())); self.handles.push((new_idx, handle.clone()));
self.accept.send(Command::Worker(worker)); self.accept.wake(WakerInterest::Worker(handle));
} }
} }
} }
@ -446,20 +438,18 @@ impl Future for ServerBuilder {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop { loop {
match ready!(Pin::new(&mut self.cmd).poll_next(cx)) { match Pin::new(&mut self.cmd).poll_recv(cx) {
Some(it) => self.as_mut().get_mut().handle_cmd(it), Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
None => { _ => return Poll::Pending,
return Poll::Pending;
}
} }
} }
} }
} }
pub(super) fn bind_addr<S: net::ToSocketAddrs>( pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S, addr: S,
backlog: i32, backlog: u32,
) -> io::Result<Vec<net::TcpListener>> { ) -> io::Result<Vec<MioTcpListener>> {
let mut err = None; let mut err = None;
let mut succ = false; let mut succ = false;
let mut sockets = Vec::new(); let mut sockets = Vec::new();
@ -487,14 +477,13 @@ pub(super) fn bind_addr<S: net::ToSocketAddrs>(
} }
} }
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> { fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let domain = match addr { let socket = match addr {
net::SocketAddr::V4(_) => Domain::ipv4(), StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
net::SocketAddr::V6(_) => Domain::ipv6(), StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
}; };
let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp()))?;
socket.set_reuse_address(true)?; socket.set_reuseaddr(true)?;
socket.bind(&addr.into())?; socket.bind(addr)?;
socket.listen(backlog)?; socket.listen(backlog)
Ok(socket.into_tcp_listener())
} }

View File

@ -1,5 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::{fmt, io, net}; use std::future::Future;
use std::{fmt, io};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_service::{ use actix_service::{
@ -7,23 +8,23 @@ use actix_service::{
ServiceFactory as BaseServiceFactory, ServiceFactory as BaseServiceFactory,
}; };
use actix_utils::counter::CounterGuard; use actix_utils::counter::CounterGuard;
use futures_util::future::{ok, Future, FutureExt, LocalBoxFuture}; use futures_core::future::LocalBoxFuture;
use log::error; use log::error;
use super::builder::bind_addr; use crate::builder::bind_addr;
use super::service::{BoxedServerService, InternalServiceFactory, StreamService}; use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use super::Token; use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::StdStream; use crate::{ready, Token};
pub struct ServiceConfig { pub struct ServiceConfig {
pub(crate) services: Vec<(String, net::TcpListener)>, pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>, pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize, pub(crate) threads: usize,
pub(crate) backlog: i32, pub(crate) backlog: u32,
} }
impl ServiceConfig { impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig { pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig { ServiceConfig {
threads, threads,
backlog, backlog,
@ -43,24 +44,20 @@ impl ServiceConfig {
/// Add new service to server /// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self> pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where where
U: net::ToSocketAddrs, U: ToSocketAddrs,
{ {
let sockets = bind_addr(addr, self.backlog)?; let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets { for lst in sockets {
self.listen(name.as_ref(), lst); self._listen(name.as_ref(), lst);
} }
Ok(self) Ok(self)
} }
/// Add new service to server /// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self { pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
if self.apply.is_none() { self._listen(name, MioTcpListener::from_std(lst))
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
} }
/// Register service configuration function. This function get called /// Register service configuration function. This function get called
@ -72,11 +69,19 @@ impl ServiceConfig {
self.apply = Some(Box::new(f)); self.apply = Some(Box::new(f));
Ok(()) Ok(())
} }
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
} }
pub(super) struct ConfiguredService { pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>, rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, net::SocketAddr)>, names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>, topics: HashMap<String, Token>,
services: Vec<Token>, services: Vec<Token>,
} }
@ -91,7 +96,7 @@ impl ConfiguredService {
} }
} }
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) { pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr)); self.names.insert(token, (name.clone(), addr));
self.topics.insert(name, token); self.topics.insert(name, token);
self.services.push(token); self.services.push(token);
@ -121,7 +126,7 @@ impl InternalServiceFactory for ConfiguredService {
let tokens = self.services.clone(); let tokens = self.services.clone();
// construct services // construct services
async move { Box::pin(async move {
let mut services = rt.services; let mut services = rt.services;
// TODO: Proper error handling here // TODO: Proper error handling here
for f in rt.onstart.into_iter() { for f in rt.onstart.into_iter() {
@ -146,14 +151,13 @@ impl InternalServiceFactory for ConfiguredService {
token, token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| { Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name); error!("Service {:?} is not configured", name);
ok::<_, ()>(()) ready::<Result<_, ()>>(Ok(()))
}))), }))),
)); ));
}; };
} }
Ok(res) Ok(res)
} })
.boxed_local()
} }
} }
@ -233,13 +237,13 @@ impl ServiceRuntime {
where where
F: Future<Output = ()> + 'static, F: Future<Output = ()> + 'static,
{ {
self.onstart.push(fut.boxed_local()) self.onstart.push(Box::pin(fut))
} }
} }
type BoxedNewService = Box< type BoxedNewService = Box<
dyn BaseServiceFactory< dyn BaseServiceFactory<
(Option<CounterGuard>, StdStream), (Option<CounterGuard>, MioStream),
Response = (), Response = (),
Error = (), Error = (),
InitError = (), InitError = (),
@ -253,7 +257,7 @@ struct ServiceFactory<T> {
inner: T, inner: T,
} }
impl<T> BaseServiceFactory<(Option<CounterGuard>, StdStream)> for ServiceFactory<T> impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where where
T: BaseServiceFactory<TcpStream, Config = ()>, T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static, T::Future: 'static,
@ -270,7 +274,7 @@ where
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(()); let fut = self.inner.new_service(());
async move { Box::pin(async move {
match fut.await { match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService), Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => { Err(e) => {
@ -278,7 +282,6 @@ where
Err(()) Err(())
} }
} }
} })
.boxed_local()
} }
} }

View File

@ -11,6 +11,7 @@ mod server;
mod service; mod service;
mod signals; mod signals;
mod socket; mod socket;
mod waker_queue;
mod worker; mod worker;
pub use self::builder::ServerBuilder; pub use self::builder::ServerBuilder;
@ -21,11 +22,25 @@ pub use self::service::ServiceFactory;
#[doc(hidden)] #[doc(hidden)]
pub use self::socket::FromStream; pub use self::socket::FromStream;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket ID token /// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize); pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token { impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token { pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0); let token = Token(self.0);
self.0 += 1; self.0 += 1;
@ -37,3 +52,90 @@ impl Token {
pub fn new() -> ServerBuilder { pub fn new() -> ServerBuilder {
ServerBuilder::default() ServerBuilder::default()
} }
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@ -3,9 +3,8 @@ use std::io;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures_channel::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use futures_channel::oneshot; use tokio::sync::oneshot;
use futures_util::FutureExt;
use crate::builder::ServerBuilder; use crate::builder::ServerBuilder;
use crate::signals::Signal; use crate::signals::Signal;
@ -42,11 +41,11 @@ impl Server {
} }
pub(crate) fn signal(&self, sig: Signal) { pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.unbounded_send(ServerCommand::Signal(sig)); let _ = self.0.send(ServerCommand::Signal(sig));
} }
pub(crate) fn worker_faulted(&self, idx: usize) { pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.unbounded_send(ServerCommand::WorkerFaulted(idx)); let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
} }
/// Pause accepting incoming connections /// Pause accepting incoming connections
@ -55,15 +54,19 @@ impl Server {
/// All opened connection remains active. /// All opened connection remains active.
pub fn pause(&self) -> impl Future<Output = ()> { pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Pause(tx)); let _ = self.0.send(ServerCommand::Pause(tx));
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
/// Resume accepting incoming connections /// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Output = ()> { pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Resume(tx)); let _ = self.0.send(ServerCommand::Resume(tx));
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
/// Stop incoming connection processing, stop all workers and exit. /// Stop incoming connection processing, stop all workers and exit.
@ -71,11 +74,13 @@ impl Server {
/// If server starts with `spawn()` method, then spawned thread get terminated. /// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> { pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Stop { let _ = self.0.send(ServerCommand::Stop {
graceful, graceful,
completion: Some(tx), completion: Some(tx),
}); });
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
} }
@ -93,7 +98,7 @@ impl Future for Server {
if this.1.is_none() { if this.1.is_none() {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
if this.0.unbounded_send(ServerCommand::Notify(tx)).is_err() { if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} }
this.1 = Some(rx); this.1 = Some(rx);
@ -101,8 +106,7 @@ impl Future for Server {
match Pin::new(this.1.as_mut().unwrap()).poll(cx) { match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())), Poll::Ready(_) => Poll::Ready(Ok(())),
Poll::Ready(Err(_)) => Poll::Ready(Ok(())),
} }
} }
} }

View File

@ -2,15 +2,13 @@ use std::marker::PhantomData;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use actix_rt::spawn;
use actix_service::{Service, ServiceFactory as BaseServiceFactory}; use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard; use actix_utils::counter::CounterGuard;
use futures_util::future::{err, ok, LocalBoxFuture, Ready}; use futures_core::future::LocalBoxFuture;
use futures_util::{FutureExt, TryFutureExt};
use log::error; use log::error;
use super::Token; use crate::socket::{FromStream, MioStream};
use crate::socket::{FromStream, StdStream}; use crate::{ready, Ready, Token};
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static { pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>; type Factory: BaseServiceFactory<Stream, Config = ()>;
@ -28,7 +26,7 @@ pub(crate) trait InternalServiceFactory: Send {
pub(crate) type BoxedServerService = Box< pub(crate) type BoxedServerService = Box<
dyn Service< dyn Service<
(Option<CounterGuard>, StdStream), (Option<CounterGuard>, MioStream),
Response = (), Response = (),
Error = (), Error = (),
Future = Ready<Result<(), ()>>, Future = Ready<Result<(), ()>>,
@ -49,7 +47,7 @@ impl<S, I> StreamService<S, I> {
} }
} }
impl<S, I> Service<(Option<CounterGuard>, StdStream)> for StreamService<S, I> impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
where where
S: Service<I>, S: Service<I>,
S::Future: 'static, S::Future: 'static,
@ -64,21 +62,21 @@ where
self.service.poll_ready(ctx).map_err(|_| ()) self.service.poll_ready(ctx).map_err(|_| ())
} }
fn call(&mut self, (guard, req): (Option<CounterGuard>, StdStream)) -> Self::Future { fn call(&mut self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
match FromStream::from_stdstream(req) { ready(match FromStream::from_mio(req) {
Ok(stream) => { Ok(stream) => {
let f = self.service.call(stream); let f = self.service.call(stream);
spawn(async move { actix_rt::spawn(async move {
let _ = f.await; let _ = f.await;
drop(guard); drop(guard);
}); });
ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!("Can not convert to an async tcp stream: {}", e); error!("Can not convert to an async tcp stream: {}", e);
err(()) Err(())
} }
} })
} }
} }
@ -132,15 +130,16 @@ where
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> { fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
let token = self.token; let token = self.token;
self.inner let fut = self.inner.create().new_service(());
.create() Box::pin(async move {
.new_service(()) match fut.await {
.map_err(|_| ()) Ok(inner) => {
.map_ok(move |inner| { let service = Box::new(StreamService::new(inner)) as _;
let service: BoxedServerService = Box::new(StreamService::new(inner)); Ok(vec![(token, service)])
vec![(token, service)] }
}) Err(_) => Err(()),
.boxed_local() }
})
} }
} }

View File

@ -2,7 +2,7 @@ use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures_util::future::lazy; use futures_core::future::LocalBoxFuture;
use crate::server::Server; use crate::server::Server;
@ -23,48 +23,51 @@ pub(crate) enum Signal {
pub(crate) struct Signals { pub(crate) struct Signals {
srv: Server, srv: Server,
#[cfg(not(unix))] #[cfg(not(unix))]
stream: Pin<Box<dyn Future<Output = std::io::Result<()>>>>, signals: LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)] #[cfg(unix)]
streams: Vec<(Signal, actix_rt::signal::unix::Signal)>, signals: Vec<(Signal, LocalBoxFuture<'static, ()>)>,
} }
impl Signals { impl Signals {
pub(crate) fn start(srv: Server) { pub(crate) fn start(srv: Server) {
actix_rt::spawn(lazy(|_| { #[cfg(not(unix))]
#[cfg(not(unix))] {
{ actix_rt::spawn(Signals {
actix_rt::spawn(Signals { srv,
srv, signals: Box::pin(actix_rt::signal::ctrl_c()),
stream: Box::pin(actix_rt::signal::ctrl_c()), });
}); }
} #[cfg(unix)]
#[cfg(unix)] {
{ use actix_rt::signal::unix;
use actix_rt::signal::unix;
let mut streams = Vec::new(); let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let sig_map = [ let mut signals = Vec::new();
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
for (kind, sig) in sig_map.iter() { for (kind, sig) in sig_map.iter() {
match unix::signal(*kind) { match unix::signal(*kind) {
Ok(stream) => streams.push((*sig, stream)), Ok(mut stream) => {
Err(e) => log::error!( let fut = Box::pin(async move {
"Can not initialize stream handler for {:?} err: {}", let _ = stream.recv().await;
sig, }) as _;
e signals.push((*sig, fut));
),
} }
Err(e) => log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
),
} }
actix_rt::spawn(Signals { srv, streams })
} }
}));
actix_rt::spawn(Signals { srv, signals });
}
} }
} }
@ -73,25 +76,20 @@ impl Future for Signals {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))] #[cfg(not(unix))]
match Pin::new(&mut self.stream).poll(cx) { match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => { Poll::Ready(_) => {
self.srv.signal(Signal::Int); self.srv.signal(Signal::Int);
Poll::Ready(()) Poll::Ready(())
} }
Poll::Pending => return Poll::Pending, Poll::Pending => Poll::Pending,
} }
#[cfg(unix)] #[cfg(unix)]
{ {
for idx in 0..self.streams.len() { for (sig, fut) in self.signals.iter_mut() {
loop { if fut.as_mut().poll(cx).is_ready() {
match self.streams[idx].1.poll_recv(cx) { let sig = *sig;
Poll::Ready(None) => return Poll::Ready(()), self.srv.signal(sig);
Poll::Pending => break, return Poll::Ready(());
Poll::Ready(Some(_)) => {
let sig = self.streams[idx].0;
self.srv.signal(sig);
}
}
} }
} }
Poll::Pending Poll::Pending

View File

@ -1,135 +1,91 @@
use std::{fmt, io, net}; pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
use std::{fmt, io};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
pub(crate) enum StdListener { #[cfg(windows)]
Tcp(net::TcpListener), use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(all(unix))] #[cfg(unix)]
Uds(std::os::unix::net::UnixListener), use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
#[cfg(unix)]
Uds(MioUnixListener),
} }
pub(crate) enum SocketAddr { impl MioListener {
Tcp(net::SocketAddr),
#[cfg(all(unix))]
Uds(std::os::unix::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(all(unix))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(all(unix))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Display for StdListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
StdListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(all(unix))]
StdListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
impl StdListener {
pub(crate) fn local_addr(&self) -> SocketAddr { pub(crate) fn local_addr(&self) -> SocketAddr {
match self {
StdListener::Tcp(lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(all(unix))]
StdListener::Uds(lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn into_listener(self) -> SocketListener {
match self {
StdListener::Tcp(lst) => SocketListener::Tcp(
mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener"),
),
#[cfg(all(unix))]
StdListener::Uds(lst) => SocketListener::Uds(
mio_uds::UnixListener::from_listener(lst)
.expect("Can not create mio_uds::UnixListener"),
),
}
}
}
#[derive(Debug)]
pub enum StdStream {
Tcp(std::net::TcpStream),
#[cfg(all(unix))]
Uds(std::os::unix::net::UnixStream),
}
pub(crate) enum SocketListener {
Tcp(mio::net::TcpListener),
#[cfg(all(unix))]
Uds(mio_uds::UnixListener),
}
impl SocketListener {
pub(crate) fn accept(&self) -> io::Result<Option<(StdStream, SocketAddr)>> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
.accept_std() #[cfg(unix)]
.map(|(stream, addr)| Some((StdStream::Tcp(stream), SocketAddr::Tcp(addr)))), MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
#[cfg(all(unix))] }
SocketListener::Uds(ref lst) => lst.accept_std().map(|res| { }
res.map(|(stream, addr)| (StdStream::Uds(stream), SocketAddr::Uds(addr)))
}), pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
} }
} }
} }
impl mio::Evented for SocketListener { impl Source for MioListener {
fn register( fn register(
&self, &mut self,
poll: &mio::Poll, registry: &Registry,
token: mio::Token, token: Token,
interest: mio::Ready, interests: Interest,
opts: mio::PollOpt,
) -> io::Result<()> { ) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.register(poll, token, interest, opts), MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => lst.register(poll, token, interest, opts), MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
} }
} }
fn reregister( fn reregister(
&self, &mut self,
poll: &mio::Poll, registry: &Registry,
token: mio::Token, token: Token,
interest: mio::Ready, interests: Interest,
opts: mio::PollOpt,
) -> io::Result<()> { ) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.reregister(poll, token, interest, opts), MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => lst.reregister(poll, token, interest, opts), MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
} }
} }
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.deregister(poll), MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => { MioListener::Uds(ref mut lst) => {
let res = lst.deregister(poll); let res = lst.deregister(registry);
// cleanup file path // cleanup file path
if let Ok(addr) = lst.local_addr() { if let Ok(addr) = lst.local_addr() {
@ -143,28 +99,156 @@ impl mio::Evented for SocketListener {
} }
} }
pub trait FromStream: AsyncRead + AsyncWrite + Sized { impl From<StdTcpListener> for MioListener {
fn from_stdstream(sock: StdStream) -> io::Result<Self>; fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
} }
impl FromStream for TcpStream { #[cfg(unix)]
fn from_stdstream(sock: StdStream) -> io::Result<Self> { impl From<StdUnixListener> for MioListener {
match sock { fn from(lst: StdUnixListener) -> Self {
StdStream::Tcp(stream) => TcpStream::from_std(stream), MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))] #[cfg(all(unix))]
StdStream::Uds(_) => { MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
pub(crate) enum SocketAddr {
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl"); panic!("Should not happen, bug in server impl");
} }
} }
} }
} }
#[cfg(all(unix))] // FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for actix_rt::net::UnixStream { #[cfg(windows)]
fn from_stdstream(sock: StdStream) -> io::Result<Self> { impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock { match sock {
StdStream::Tcp(_) => panic!("Should not happen, bug in server impl"), MioStream::Tcp(mio) => {
StdStream::Uds(stream) => actix_rt::net::UnixStream::from_std(stream), let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
} }
} }
} }

View File

@ -0,0 +1,89 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
/// waker token for `mio::Poll` instance
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
}

View File

@ -1,22 +1,22 @@
use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time; use std::time::Duration;
use actix_rt::time::{delay_until, Delay, Instant}; use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter}; use actix_rt::{spawn, Arbiter};
use actix_utils::counter::Counter; use actix_utils::counter::Counter;
use futures_channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; use futures_core::future::LocalBoxFuture;
use futures_channel::oneshot;
use futures_util::future::{join_all, LocalBoxFuture, MapOk};
use futures_util::{future::Future, stream::Stream, FutureExt, TryFutureExt};
use log::{error, info, trace}; use log::{error, info, trace};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use crate::accept::AcceptNotify;
use crate::service::{BoxedServerService, InternalServiceFactory}; use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{SocketAddr, StdStream}; use crate::socket::{MioStream, SocketAddr};
use crate::Token; use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
pub(crate) struct WorkerCommand(Conn); pub(crate) struct WorkerCommand(Conn);
@ -29,7 +29,7 @@ pub(crate) struct StopCommand {
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Conn { pub(crate) struct Conn {
pub io: StdStream, pub io: MioStream,
pub token: Token, pub token: Token,
pub peer: Option<SocketAddr>, pub peer: Option<SocketAddr>,
} }
@ -46,31 +46,33 @@ pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed); MAX_CONNS.store(num, Ordering::Relaxed);
} }
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
thread_local! { thread_local! {
static MAX_CONNS_COUNTER: Counter = static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed)); Counter::new(MAX_CONNS.load(Ordering::Relaxed));
} }
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct WorkerClient { pub(crate) struct WorkerHandle {
pub idx: usize, pub idx: usize,
tx1: UnboundedSender<WorkerCommand>, tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>, tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability, avail: WorkerAvailability,
} }
impl WorkerClient { impl WorkerHandle {
pub fn new( pub fn new(
idx: usize, idx: usize,
tx1: UnboundedSender<WorkerCommand>, tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>, tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability, avail: WorkerAvailability,
) -> Self { ) -> Self {
WorkerClient { WorkerHandle {
idx, idx,
tx1, tx1,
tx2, tx2,
@ -79,9 +81,7 @@ impl WorkerClient {
} }
pub fn send(&self, msg: Conn) -> Result<(), Conn> { pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1 self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
.unbounded_send(WorkerCommand(msg))
.map_err(|msg| msg.into_inner().0)
} }
pub fn available(&self) -> bool { pub fn available(&self) -> bool {
@ -90,21 +90,21 @@ impl WorkerClient {
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> { pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel(); let (result, rx) = oneshot::channel();
let _ = self.tx2.unbounded_send(StopCommand { graceful, result }); let _ = self.tx2.send(StopCommand { graceful, result });
rx rx
} }
} }
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct WorkerAvailability { pub(crate) struct WorkerAvailability {
notify: AcceptNotify, waker: WakerQueue,
available: Arc<AtomicBool>, available: Arc<AtomicBool>,
} }
impl WorkerAvailability { impl WorkerAvailability {
pub fn new(notify: AcceptNotify) -> Self { pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability { WorkerAvailability {
notify, waker,
available: Arc::new(AtomicBool::new(false)), available: Arc::new(AtomicBool::new(false)),
} }
} }
@ -115,8 +115,9 @@ impl WorkerAvailability {
pub fn set(&self, val: bool) { pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release); let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val { if !old && val {
self.notify.notify() self.waker.wake(WakerInterest::WorkerAvailable);
} }
} }
} }
@ -133,7 +134,7 @@ pub(crate) struct Worker {
conns: Counter, conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>, factories: Vec<Box<dyn InternalServiceFactory>>,
state: WorkerState, state: WorkerState,
shutdown_timeout: time::Duration, shutdown_timeout: Duration,
} }
struct WorkerService { struct WorkerService {
@ -164,63 +165,65 @@ impl Worker {
idx: usize, idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>, factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability, availability: WorkerAvailability,
shutdown_timeout: time::Duration, shutdown_timeout: Duration,
) -> WorkerClient { ) -> WorkerHandle {
let (tx1, rx) = unbounded(); let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded(); let (tx2, rx2) = unbounded_channel();
let avail = availability.clone(); let avail = availability.clone();
Arbiter::new().send( // every worker runs in it's own arbiter.
async move { Arbiter::new().send(Box::pin(async move {
availability.set(false); availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| Worker { let mut wrk = MAX_CONNS_COUNTER.with(move |conns| Worker {
rx, rx,
rx2, rx2,
availability, availability,
factories, factories,
shutdown_timeout, shutdown_timeout,
services: Vec::new(), services: Vec::new(),
conns: conns.clone(), conns: conns.clone(),
state: WorkerState::Unavailable(Vec::new()), state: WorkerState::Unavailable,
}); });
let mut fut: Vec<MapOk<LocalBoxFuture<'static, _>, _>> = Vec::new(); let fut = wrk
for (idx, factory) in wrk.factories.iter().enumerate() { .factories
fut.push(factory.create().map_ok(move |r| { .iter()
r.into_iter() .enumerate()
.map(|(t, s): (Token, _)| (idx, t, s)) .map(|(idx, factory)| {
.collect::<Vec<_>>() let fut = factory.create();
})); async move {
} fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.collect::<Vec<_>>();
spawn(async move { spawn(async move {
let res = join_all(fut).await; let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
let res: Result<Vec<_>, _> = res.into_iter().collect(); match res {
match res { Ok(services) => {
Ok(services) => { for item in services {
for item in services { for (factory, token, service) in item {
for (factory, token, service) in item { assert_eq!(token.0, wrk.services.len());
assert_eq!(token.0, wrk.services.len()); wrk.services.push(WorkerService {
wrk.services.push(WorkerService { factory,
factory, service,
service, status: WorkerServiceStatus::Unavailable,
status: WorkerServiceStatus::Unavailable, });
});
}
} }
} }
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
} }
wrk.await Err(e) => {
}); error!("Can not start worker: {:?}", e);
} Arbiter::current().stop();
.boxed(), }
); }
wrk.await
});
}));
WorkerClient::new(idx, tx1, tx2, avail) WorkerHandle::new(idx, tx1, tx2, avail)
} }
fn shutdown(&mut self, force: bool) { fn shutdown(&mut self, force: bool) {
@ -242,7 +245,7 @@ impl Worker {
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> { fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx); let mut ready = self.conns.available(cx);
let mut failed = None; let mut failed = None;
for (idx, srv) in &mut self.services.iter_mut().enumerate() { for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable || srv.status == WorkerServiceStatus::Unavailable
{ {
@ -288,16 +291,15 @@ impl Worker {
enum WorkerState { enum WorkerState {
Available, Available,
Unavailable(Vec<Conn>), Unavailable,
Restarting( Restarting(
usize, usize,
Token, Token,
#[allow(clippy::type_complexity)] LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
Pin<Box<dyn Future<Output = Result<Vec<(Token, BoxedServerService)>, ()>>>>,
), ),
Shutdown( Shutdown(
Pin<Box<Delay>>, Pin<Box<Sleep>>,
Pin<Box<Delay>>, Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>, Option<oneshot::Sender<bool>>,
), ),
} }
@ -305,12 +307,10 @@ enum WorkerState {
impl Future for Worker { impl Future for Worker {
type Output = (); type Output = ();
// FIXME: remove this attribute
#[allow(clippy::never_loop)]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// `StopWorker` message handler // `StopWorker` message handler
if let Poll::Ready(Some(StopCommand { graceful, result })) = if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_next(cx) Pin::new(&mut self.rx2).poll_recv(cx)
{ {
self.availability.set(false); self.availability.set(false);
let num = num_connections(); let num = num_connections();
@ -324,8 +324,8 @@ impl Future for Worker {
if num != 0 { if num != 0 {
info!("Graceful worker shutdown, {} connections", num); info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown( self.state = WorkerState::Shutdown(
Box::pin(delay_until(Instant::now() + time::Duration::from_secs(1))), Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(delay_until(Instant::now() + self.shutdown_timeout)), Box::pin(sleep_until(Instant::now() + self.shutdown_timeout)),
Some(result), Some(result),
); );
} else { } else {
@ -341,53 +341,35 @@ impl Future for Worker {
} }
match self.state { match self.state {
WorkerState::Unavailable(ref mut conns) => { WorkerState::Unavailable => match self.check_readiness(cx) {
let conn = conns.pop(); Ok(true) => {
match self.check_readiness(cx) { self.state = WorkerState::Available;
Ok(true) => { self.availability.set(true);
// process requests from wait queue self.poll(cx)
if let Some(conn) = conn {
let guard = self.conns.get();
let _ = self.services[conn.token.0]
.service
.call((Some(guard), conn.io));
} else {
self.state = WorkerState::Available;
self.availability.set(true);
}
self.poll(cx)
}
Ok(false) => {
// push connection back to queue
if let Some(conn) = conn {
if let WorkerState::Unavailable(ref mut conns) = self.state {
conns.push(conn);
}
}
Poll::Pending
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
} }
} Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => { WorkerState::Restarting(idx, token, ref mut fut) => {
match Pin::new(fut).poll(cx) { match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => { Poll::Ready(Ok(item)) => {
for (token, service) in item { // only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!( trace!(
"Service {:?} has been restarted", "Service {:?} has been restarted",
self.factories[idx].name(token) self.factories[idx].name(token)
); );
self.services[token.0].created(service); self.services[token.0].created(service);
self.state = WorkerState::Unavailable(Vec::new()); self.state = WorkerState::Unavailable;
return self.poll(cx); return self.poll(cx);
} }
} }
@ -397,9 +379,7 @@ impl Future for Worker {
self.factories[idx].name(token) self.factories[idx].name(token)
); );
} }
Poll::Pending => { Poll::Pending => return Poll::Pending,
return Poll::Pending;
}
} }
self.poll(cx) self.poll(cx)
} }
@ -412,71 +392,56 @@ impl Future for Worker {
} }
// check graceful timeout // check graceful timeout
match t2.as_mut().poll(cx) { if Pin::new(t2).poll(cx).is_ready() {
Poll::Pending => (), let _ = tx.take().unwrap().send(false);
Poll::Ready(_) => { self.shutdown(true);
let _ = tx.take().unwrap().send(false); Arbiter::current().stop();
self.shutdown(true); return Poll::Ready(());
Arbiter::current().stop();
return Poll::Ready(());
}
} }
// sleep for 1 second and then check again // sleep for 1 second and then check again
match t1.as_mut().poll(cx) { if t1.as_mut().poll(cx).is_ready() {
Poll::Pending => (), *t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
Poll::Ready(_) => { let _ = t1.as_mut().poll(cx);
*t1 = Box::pin(delay_until(
Instant::now() + time::Duration::from_secs(1),
));
let _ = t1.as_mut().poll(cx);
}
} }
Poll::Pending Poll::Pending
} }
WorkerState::Available => { // actively poll stream and handle worker command
loop { WorkerState::Available => loop {
match Pin::new(&mut self.rx).poll_next(cx) { match self.check_readiness(cx) {
// handle incoming io stream Ok(true) => (),
Poll::Ready(Some(WorkerCommand(msg))) => { Ok(false) => {
match self.check_readiness(cx) { trace!("Worker is unavailable");
Ok(true) => { self.availability.set(false);
let guard = self.conns.get(); self.state = WorkerState::Unavailable;
let _ = self.services[msg.token.0] return self.poll(cx);
.service }
.call((Some(guard), msg.io)); Err((token, idx)) => {
continue; trace!(
} "Service {:?} failed, restarting",
Ok(false) => { self.factories[idx].name(token)
trace!("Worker is unavailable"); );
self.availability.set(false); self.availability.set(false);
self.state = WorkerState::Unavailable(vec![msg]); self.services[token.0].status = WorkerServiceStatus::Restarting;
} self.state =
Err((token, idx)) => { WorkerState::Restarting(idx, token, self.factories[idx].create());
trace!( return self.poll(cx);
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status =
WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
}
}
return self.poll(cx);
}
Poll::Pending => {
self.state = WorkerState::Available;
return Poll::Pending;
}
Poll::Ready(None) => return Poll::Ready(()),
} }
} }
}
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
};
},
} }
} }
} }

View File

@ -5,14 +5,13 @@ use std::{net, thread, time};
use actix_server::Server; use actix_server::Server;
use actix_service::fn_service; use actix_service::fn_service;
use futures_util::future::{lazy, ok}; use futures_util::future::{lazy, ok};
use socket2::{Domain, Protocol, Socket, Type};
fn unused_addr() -> net::SocketAddr { fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap(); let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp())).unwrap(); let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(&addr.into()).unwrap(); socket.bind(addr).unwrap();
socket.set_reuse_address(true).unwrap(); socket.set_reuseaddr(true).unwrap();
let tcp = socket.into_tcp_listener(); let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap() tcp.local_addr().unwrap()
} }
@ -22,8 +21,7 @@ fn test_bind() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let mut sys = actix_rt::System::new("test"); let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| { let srv = sys.block_on(lazy(|_| {
Server::build() Server::build()
.workers(1) .workers(1)
@ -49,17 +47,17 @@ fn test_listen() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let mut sys = actix_rt::System::new("test"); let sys = actix_rt::System::new("test");
let lst = net::TcpListener::bind(addr).unwrap(); let lst = net::TcpListener::bind(addr).unwrap();
sys.block_on(lazy(|_| { sys.block_on(async {
Server::build() Server::build()
.disable_signals() .disable_signals()
.workers(1) .workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(()))) .listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap() .unwrap()
.start() .start();
})); let _ = tx.send(actix_rt::System::current());
let _ = tx.send(actix_rt::System::current()); });
let _ = sys.run(); let _ = sys.run();
}); });
let sys = rx.recv().unwrap(); let sys = rx.recv().unwrap();
@ -83,7 +81,7 @@ fn test_start() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let mut sys = actix_rt::System::new("test"); let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| { let srv = sys.block_on(lazy(|_| {
Server::build() Server::build()
.backlog(100) .backlog(100)
@ -102,6 +100,7 @@ fn test_start() {
let _ = tx.send((srv, actix_rt::System::current())); let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run(); let _ = sys.run();
}); });
let (srv, sys) = rx.recv().unwrap(); let (srv, sys) = rx.recv().unwrap();
let mut buf = [1u8; 4]; let mut buf = [1u8; 4];
@ -151,7 +150,7 @@ fn test_configure() {
let h = thread::spawn(move || { let h = thread::spawn(move || {
let num = num2.clone(); let num = num2.clone();
let mut sys = actix_rt::System::new("test"); let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| { let srv = sys.block_on(lazy(|_| {
Server::build() Server::build()
.disable_signals() .disable_signals()

View File

@ -18,10 +18,10 @@ name = "actix_testing"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
actix-rt = "1.0.0" actix-rt = "2.0.0-beta.1"
actix-macros = "0.1.0" actix-macros = "0.1.0"
actix-server = "1.0.0" actix-server = "1.0.0"
actix-service = "1.0.0" actix-service = "2.0.0-beta.1"
log = "0.4" log = "0.4"
socket2 = "0.3" socket2 = "0.3"

View File

@ -83,7 +83,7 @@ impl TestServer {
// run server in separate thread // run server in separate thread
thread::spawn(move || { thread::spawn(move || {
let mut sys = System::new("actix-test-server"); let sys = System::new("actix-test-server");
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap(); let local_addr = tcp.local_addr().unwrap();
@ -94,9 +94,8 @@ impl TestServer {
.workers(1) .workers(1)
.disable_signals() .disable_signals()
.start(); .start();
tx.send((System::current(), local_addr)).unwrap();
}); });
tx.send((System::current(), local_addr)).unwrap();
sys.run() sys.run()
}); });