1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-06-26 19:47:43 +02:00

move server to separate crate

This commit is contained in:
Nikolay Kim
2018-12-09 22:14:29 -08:00
parent ffb07c8884
commit 8ad93f4838
21 changed files with 308 additions and 149 deletions

5
actix-server/CHANGES.md Normal file
View File

@ -0,0 +1,5 @@
# Changes
## [0.1.0] - 2018-12-09
* Move server to separate crate

74
actix-server/Cargo.toml Normal file
View File

@ -0,0 +1,74 @@
[package]
name = "actix-server"
version = "0.1.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server - General purpose tcp server"
readme = "README.md"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = "../"
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls"]
[lib]
name = "actix_server"
path = "src/lib.rs"
[features]
default = []
# tls
tls = ["native-tls"]
# openssl
ssl = ["openssl", "tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
[dependencies]
actix-service = "0.1.1"
actix-rt = { path = "../actix-rt" }
log = "0.4"
num_cpus = "1.0"
# io
mio = "^0.6.13"
net2 = "0.2"
bytes = "0.4"
futures = "0.1"
slab = "0.4"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"
tokio-reactor = "0.1"
tokio-signal = "0.2"
# native-tls
native-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.3", optional = true }
#rustls
rustls = { version = "^0.14", optional = true }
tokio-rustls = { version = "^0.8", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
[dev-dependencies]
env_logger = "0.5"
[profile.release]
lto = true
opt-level = 3
codegen-units = 1

461
actix-server/src/accept.rs Normal file
View File

@ -0,0 +1,461 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::{io, net, thread};
use actix_rt::System;
use futures::future::{lazy, Future};
use log::{error, info};
use mio;
use slab::Slab;
use tokio_timer::Delay;
use super::server::Server;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
addr: net::SocketAddr,
token: Token,
sock: mio::net::TcpListener,
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>,
}
impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(srv),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
&mut self,
socks: Vec<(Token, net::TcpListener)>,
workers: Vec<WorkerClient>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
}
}
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>,
cmd_reg: mio::Registration,
notify_reg: mio::Registration,
socks: Vec<(Token, net::TcpListener)>,
srv: Server,
workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
rx: sync_mpsc::Receiver<Command>,
socks: Vec<(Token, net::TcpListener)>,
workers: Vec<WorkerClient>,
srv: Server,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new();
for (hnd_token, lst) in socks.into_iter() {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
sock: server,
timeout: None,
});
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll,
rx,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
fn poll(&mut self) {
// Create storage for events
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
let token = event.token();
match token {
CMD => {
if !self.process_cmd() {
return;
}
}
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
}
fn process_timer(&mut self) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst);
}
}
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().send(lazy(move || {
Delay::new(Instant::now() + Duration::from_millis(510))
.map_err(|_| ())
.and_then(move |_| {
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
})
}));
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

433
actix-server/src/builder.rs Normal file
View File

@ -0,0 +1,433 @@
use std::time::Duration;
use std::{io, mem, net};
use actix_rt::{spawn, Arbiter, System};
use futures::future::{lazy, ok};
use futures::stream::futures_unordered;
use futures::sync::mpsc::{unbounded, UnboundedReceiver};
use futures::{Async, Future, Poll, Stream};
use log::{error, info};
use net2::TcpBuilder;
use num_cpus;
use tokio_timer::sleep;
// use actix::{actors::signal};
use super::accept::{AcceptLoop, AcceptNotify, Command};
use super::config::{ConfiguredService, ServiceConfig};
use super::server::{Server, ServerCommand};
use super::services::{InternalServiceFactory, StreamNewService, StreamServiceFactory};
use super::services::{ServiceFactory, ServiceNewService};
use super::worker::{self, Worker, WorkerAvailability, WorkerClient};
use super::Token;
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: Token,
workers: Vec<(usize, WorkerClient)>,
services: Vec<Box<InternalServiceFactory>>,
sockets: Vec<(Token, net::TcpListener)>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: Duration,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
}
impl Default for ServerBuilder {
fn default() -> Self {
Self::new()
}
}
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded();
let server = Server::new(tx);
ServerBuilder {
threads: num_cpus::get(),
token: Token(0),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
exit: false,
shutdown_timeout: Duration::from_secs(30),
no_signals: false,
cmd: rx,
server,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = Duration::from_secs(u64::from(sec));
self
}
/// Run external configuration as part of the server building
/// process
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let mut cfg = ServiceConfig::new();
f(&mut cfg)?;
let mut srv = ConfiguredService::new(cfg.rt);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name);
self.sockets.push((token, lst));
}
self.services.push(Box::new(srv));
Ok(self)
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: StreamServiceFactory,
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr)?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
));
for lst in sockets {
self.sockets.push((token, lst));
}
Ok(self)
}
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: net::TcpListener,
factory: F,
) -> Self
where
F: StreamServiceFactory,
{
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
));
self.sockets.push((token, lst));
self
}
/// Add new service to the server.
pub fn listen2<F, N: AsRef<str>>(
mut self,
name: N,
lst: net::TcpListener,
factory: F,
) -> Self
where
F: ServiceFactory,
{
let token = self.token.next();
self.services.push(ServiceNewService::create(
name.as_ref().to_string(),
token,
factory,
));
self.sockets.push((token, lst));
self
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0"))
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
/// Starts processing incoming connections and return server controller.
pub fn start(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let worker = self.start_worker(idx, self.accept.get_notify());
workers.push(worker.clone());
self.workers.push((idx, worker));
}
// start accept thread
for sock in &self.sockets {
info!("Starting server on {}", sock.1.local_addr().ok().unwrap());
}
self.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
// start http server actor
// let signals = self.subscribe_to_signals();
// if let Some(signals) = signals {
// signals.do_send(signal::Subscribe(addr.clone().recipient()))
// }
let server = self.server.clone();
spawn(self);
server
}
}
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
let (tx1, rx1) = unbounded();
let (tx2, rx2) = unbounded();
let timeout = self.shutdown_timeout;
let avail = WorkerAvailability::new(notify);
let worker = WorkerClient::new(idx, tx1, tx2, avail.clone());
let services: Vec<Box<InternalServiceFactory>> =
self.services.iter().map(|v| v.clone_factory()).collect();
Arbiter::new().send(lazy(move || {
Worker::start(rx1, rx2, services, avail, timeout);
Ok::<_, ()>(())
}));
worker
}
}
// /// Signals support
// /// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
// /// message to `System` actor.
// impl Handler<signal::Signal> for Server {
// type Result = ();
// fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
// match msg.0 {
// signal::SignalType::Int => {
// info!("SIGINT received, exiting");
// self.exit = true;
// Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
// }
// signal::SignalType::Term => {
// info!("SIGTERM received, stopping");
// self.exit = true;
// Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
// }
// signal::SignalType::Quit => {
// info!("SIGQUIT received, exiting");
// self.exit = true;
// Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
// }
// _ => (),
// }
// }
// }
impl Future for ServerBuilder {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.cmd.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(Some(item))) => match item {
ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume);
let _ = tx.send(());
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.send(Command::Stop);
// stop workers
if !self.workers.is_empty() {
spawn(
futures_unordered(
self.workers
.iter()
.map(move |worker| worker.1.stop(graceful)),
)
.collect()
.then(move |_| {
let _ = completion.send(());
if exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
System::current().stop();
ok(())
}));
}
ok(())
}),
)
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
System::current().stop();
ok(())
}));
}
let _ = completion.send(());
}
}
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let worker = self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, worker.clone()));
self.accept.send(Command::Worker(worker));
}
}
},
}
}
}
}
pub(super) fn bind_addr<S: net::ToSocketAddrs>(addr: S) -> io::Result<Vec<net::TcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr) {
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
fn create_tcp_listener(addr: net::SocketAddr) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(1024)?)
}

213
actix-server/src/config.rs Normal file
View File

@ -0,0 +1,213 @@
use std::collections::HashMap;
use std::{fmt, io, net};
use actix_service::{IntoNewService, NewService};
use futures::future::{join_all, Future};
use log::error;
use tokio_tcp::TcpStream;
use crate::counter::CounterGuard;
use super::builder::bind_addr;
use super::services::{
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
};
use super::Token;
pub struct ServiceConfig {
pub(super) services: Vec<(String, net::TcpListener)>,
pub(super) rt: Box<ServiceRuntimeConfiguration>,
}
impl ServiceConfig {
pub(super) fn new() -> ServiceConfig {
ServiceConfig {
services: Vec::new(),
rt: Box::new(not_configured),
}
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr)?;
for lst in sockets {
self.listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
self.services.push((name.as_ref().to_string(), lst));
self
}
/// Register service configuration function
pub fn rt<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.rt = Box::new(f);
Ok(())
}
}
pub(super) struct ConfiguredService {
rt: Box<ServiceRuntimeConfiguration>,
names: HashMap<Token, String>,
services: HashMap<String, Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
services: HashMap::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String) {
self.names.insert(token, name.clone());
self.services.insert(name, token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token]
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.services.clone());
self.rt.configure(&mut rt);
rt.validate();
// construct services
let mut fut = Vec::new();
for (token, ns) in rt.services {
fut.push(ns.new_service().map(move |service| (token, service)));
}
Box::new(join_all(fut).map_err(|e| {
error!("Can not construct service: {:?}", e);
}))
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoNewService<T, TcpStream>,
T: NewService<TcpStream, Response = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
token.clone(),
Box::new(ServiceFactory {
inner: service.into_new_service(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
}
type BoxedNewService = Box<
NewService<
(Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
InitError = (),
Service = BoxedServerService,
Future = Box<Future<Item = BoxedServerService, Error = ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> NewService<(Option<CounterGuard>, ServerMessage)> for ServiceFactory<T>
where
T: NewService<TcpStream, Response = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Response = ();
type Error = ();
type InitError = ();
type Service = BoxedServerService;
type Future = Box<Future<Item = BoxedServerService, Error = ()>>;
fn new_service(&self) -> Self::Future {
Box::new(self.inner.new_service().map_err(|_| ()).map(|s| {
let service: BoxedServerService = Box::new(StreamService::new(s));
service
}))
}
}

View File

@ -0,0 +1,78 @@
use std::cell::Cell;
use std::rc::Rc;
use futures::task::AtomicTask;
#[derive(Clone)]
/// Simple counter with ability to notify task on reaching specific number
///
/// Counter could be cloned, total ncount is shared across all clones.
pub struct Counter(Rc<CounterInner>);
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: AtomicTask,
}
impl Counter {
/// Create `Counter` instance and set max value.
pub fn new(capacity: usize) -> Self {
Counter(Rc::new(CounterInner {
capacity,
count: Cell::new(0),
task: AtomicTask::new(),
}))
}
pub fn get(&self) -> CounterGuard {
CounterGuard::new(self.0.clone())
}
/// Check if counter is not at capacity
pub fn available(&self) -> bool {
self.0.available()
}
/// Get total number of acquired counts
pub fn total(&self) -> usize {
self.0.count.get()
}
}
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}
impl CounterInner {
fn inc(&self) {
let num = self.count.get() + 1;
self.count.set(num);
if num == self.capacity {
self.task.register();
}
}
fn dec(&self) {
let num = self.count.get();
self.count.set(num - 1);
if num == self.capacity {
self.task.notify();
}
}
fn available(&self) -> bool {
self.count.get() < self.capacity
}
}

27
actix-server/src/lib.rs Normal file
View File

@ -0,0 +1,27 @@
//! General purpose tcp server
mod accept;
mod builder;
mod config;
mod counter;
mod server;
mod services;
pub mod ssl;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::services::{ServerMessage, ServiceFactory, StreamServiceFactory};
/// Socket id token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Token {
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0 + 1);
self.0 += 1;
token
}
}

View File

@ -0,0 +1,63 @@
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use futures::Future;
use super::builder::ServerBuilder;
pub(crate) enum ServerCommand {
WorkerDied(usize),
Pause(oneshot::Sender<()>),
Resume(oneshot::Sender<()>),
/// Whether to try and shut down gracefully
Stop {
graceful: bool,
completion: oneshot::Sender<()>,
},
}
#[derive(Clone)]
pub struct Server(UnboundedSender<ServerCommand>);
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx)
}
/// Start server building process
pub fn build() -> ServerBuilder {
ServerBuilder::default()
}
pub(crate) fn worker_died(&self, idx: usize) {
let _ = self.0.unbounded_send(ServerCommand::WorkerDied(idx));
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Pause(tx));
rx.map_err(|_| ())
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Resume(tx));
rx.map_err(|_| ())
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Stop {
graceful,
completion: tx,
});
rx.map_err(|_| ())
}
}

View File

@ -0,0 +1,261 @@
use std::net;
use std::time::Duration;
use actix_rt::spawn;
use actix_service::{NewService, Service};
use futures::future::{err, ok, FutureResult};
use futures::{Future, Poll};
use log::error;
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use super::Token;
use crate::counter::CounterGuard;
/// Server message
pub enum ServerMessage {
/// New stream
Connect(net::TcpStream),
/// Gracefull shutdown
Shutdown(Duration),
/// Force shutdown
ForceShutdown,
}
pub trait StreamServiceFactory: Send + Clone + 'static {
type NewService: NewService<TcpStream, Response = ()>;
fn create(&self) -> Self::NewService;
}
pub trait ServiceFactory: Send + Clone + 'static {
type NewService: NewService<ServerMessage, Response = ()>;
fn create(&self) -> Self::NewService;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<InternalServiceFactory>;
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>;
}
pub(crate) type BoxedServerService = Box<
Service<
(Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
Future = FutureResult<(), ()>,
>,
>;
pub(crate) struct StreamService<T> {
service: T,
}
impl<T> StreamService<T> {
pub(crate) fn new(service: T) -> Self {
StreamService { service }
}
}
impl<T> Service<(Option<CounterGuard>, ServerMessage)> for StreamService<T>
where
T: Service<TcpStream, Response = ()>,
T::Future: 'static,
T::Error: 'static,
{
type Response = ();
type Error = ();
type Future = FutureResult<(), ()>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.service.poll_ready().map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
let stream = TcpStream::from_std(stream, &Handle::default()).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
});
if let Ok(stream) = stream {
spawn(self.service.call(stream).map_err(|_| ()).map(move |val| {
drop(guard);
val
}));
ok(())
} else {
err(())
}
}
_ => ok(()),
}
}
}
pub(crate) struct ServerService<T> {
service: T,
}
impl<T> ServerService<T> {
fn new(service: T) -> Self {
ServerService { service }
}
}
impl<T> Service<(Option<CounterGuard>, ServerMessage)> for ServerService<T>
where
T: Service<ServerMessage, Response = ()>,
T::Future: 'static,
T::Error: 'static,
{
type Response = ();
type Error = ();
type Future = FutureResult<(), ()>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.service.poll_ready().map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future {
spawn(self.service.call(req).map_err(|_| ()).map(move |val| {
drop(guard);
val
}));
ok(())
}
}
pub(crate) struct ServiceNewService<F: ServiceFactory> {
name: String,
inner: F,
token: Token,
}
impl<F> ServiceNewService<F>
where
F: ServiceFactory,
{
pub(crate) fn create(name: String, token: Token, inner: F) -> Box<InternalServiceFactory> {
Box::new(Self { name, inner, token })
}
}
impl<F> InternalServiceFactory for ServiceNewService<F>
where
F: ServiceFactory,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
})
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
let token = self.token;
Box::new(
self.inner
.create()
.new_service()
.map_err(|_| ())
.map(move |inner| {
let service: BoxedServerService = Box::new(ServerService::new(inner));
vec![(token, service)]
}),
)
}
}
pub(crate) struct StreamNewService<F: StreamServiceFactory> {
name: String,
inner: F,
token: Token,
}
impl<F> StreamNewService<F>
where
F: StreamServiceFactory,
{
pub(crate) fn create(name: String, token: Token, inner: F) -> Box<InternalServiceFactory> {
Box::new(Self { name, token, inner })
}
}
impl<F> InternalServiceFactory for StreamNewService<F>
where
F: StreamServiceFactory,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
})
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
let token = self.token;
Box::new(
self.inner
.create()
.new_service()
.map_err(|_| ())
.map(move |inner| {
let service: BoxedServerService = Box::new(StreamService::new(inner));
vec![(token, service)]
}),
)
}
}
impl InternalServiceFactory for Box<InternalServiceFactory> {
fn name(&self, token: Token) -> &str {
self.as_ref().name(token)
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
self.as_ref().clone_factory()
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
self.as_ref().create()
}
}
impl<F, T> ServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T: NewService<ServerMessage, Response = ()>,
{
type NewService = T;
fn create(&self) -> T {
(self)()
}
}
impl<F, T> StreamServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T: NewService<TcpStream, Response = ()>,
{
type NewService = T;
fn create(&self) -> T {
(self)()
}
}

View File

@ -0,0 +1,35 @@
//! SSL Services
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::counter::Counter;
#[cfg(feature = "ssl")]
mod openssl;
#[cfg(feature = "ssl")]
pub use self::openssl::OpensslAcceptor;
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;
#[cfg(feature = "rust-tls")]
pub use self::rustls::RustlsAcceptor;
/// Sets the maximum per-worker concurrent ssl connection establish process.
///
/// All listeners will stop accepting connections when this limit is
/// reached. It can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn max_concurrent_ssl_connect(num: usize) {
MAX_CONN.store(num, Ordering::Relaxed);
}
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);
thread_local! {
static MAX_CONN_COUNTER: Counter = Counter::new(MAX_CONN.load(Ordering::Relaxed));
}

View File

@ -0,0 +1,164 @@
use std::io;
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use native_tls::{self, Error, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use super::MAX_CONN_COUNTER;
use crate::counter::{Counter, CounterGuard};
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor<T> {
acceptor: TlsAcceptor,
io: PhantomData<T>,
}
impl<T: AsyncRead + AsyncWrite> NativeTlsAcceptor<T> {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite> Clone for NativeTlsAcceptor<T> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite> NewService<T> for NativeTlsAcceptor<T> {
type Response = TlsStream<T>;
type Error = Error;
type Service = NativeTlsAcceptorService<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
MAX_CONN_COUNTER.with(|conns| {
ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct NativeTlsAcceptorService<T> {
acceptor: TlsAcceptor,
io: PhantomData<T>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite> Service<T> for NativeTlsAcceptorService<T> {
type Response = TlsStream<T>;
type Error = Error;
type Future = Accept<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: T) -> Self::Future {
Accept {
_guard: self.conns.get(),
inner: Some(self.acceptor.accept(req)),
}
}
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
_guard: CounterGuard,
}
impl<Io: AsyncRead + AsyncWrite> Future for Accept<Io> {
type Item = TlsStream<Io>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
}
}

View File

@ -0,0 +1,99 @@
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use openssl::ssl::{HandshakeError, SslAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use super::MAX_CONN_COUNTER;
use crate::counter::{Counter, CounterGuard};
/// Support `SSL` connections via openssl package
///
/// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor<T> {
acceptor: SslAcceptor,
io: PhantomData<T>,
}
impl<T> OpensslAcceptor<T> {
/// Create default `OpensslAcceptor`
pub fn new(acceptor: SslAcceptor) -> Self {
OpensslAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite> Clone for OpensslAcceptor<T> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite> NewService<T> for OpensslAcceptor<T> {
type Response = SslStream<T>;
type Error = HandshakeError<T>;
type Service = OpensslAcceptorService<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
MAX_CONN_COUNTER.with(|conns| {
ok(OpensslAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct OpensslAcceptorService<T> {
acceptor: SslAcceptor,
io: PhantomData<T>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite> Service<T> for OpensslAcceptorService<T> {
type Response = SslStream<T>;
type Error = HandshakeError<T>;
type Future = OpensslAcceptorServiceFut<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: T) -> Self::Future {
OpensslAcceptorServiceFut {
_guard: self.conns.get(),
fut: SslAcceptorExt::accept_async(&self.acceptor, req),
}
}
}
pub struct OpensslAcceptorServiceFut<T>
where
T: AsyncRead + AsyncWrite,
{
fut: AcceptAsync<T>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite> Future for OpensslAcceptorServiceFut<T> {
type Item = SslStream<T>;
type Error = HandshakeError<T>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.fut.poll()
}
}

View File

@ -0,0 +1,101 @@
use std::io;
use std::marker::PhantomData;
use std::sync::Arc;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use rustls::{ServerConfig, ServerSession};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
use super::MAX_CONN_COUNTER;
use crate::counter::{Counter, CounterGuard};
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor<T> {
config: Arc<ServerConfig>,
io: PhantomData<T>,
}
impl<T: AsyncRead + AsyncWrite> RustlsAcceptor<T> {
/// Create `RustlsAcceptor` new service
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor {
config: Arc::new(config),
io: PhantomData,
}
}
}
impl<T> Clone for RustlsAcceptor<T> {
fn clone(&self) -> Self {
Self {
config: self.config.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite> NewService<T> for RustlsAcceptor<T> {
type Response = TlsStream<T, ServerSession>;
type Error = io::Error;
type Service = RustlsAcceptorService<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
MAX_CONN_COUNTER.with(|conns| {
ok(RustlsAcceptorService {
acceptor: self.config.clone().into(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct RustlsAcceptorService<T> {
acceptor: TlsAcceptor,
io: PhantomData<T>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite> Service<T> for RustlsAcceptorService<T> {
type Response = TlsStream<T, ServerSession>;
type Error = io::Error;
type Future = RustlsAcceptorServiceFut<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: T) -> Self::Future {
RustlsAcceptorServiceFut {
_guard: self.conns.get(),
fut: self.acceptor.accept(req),
}
}
}
pub struct RustlsAcceptorServiceFut<T>
where
T: AsyncRead + AsyncWrite,
{
fut: Accept<T>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite> Future for RustlsAcceptorServiceFut<T> {
type Item = TlsStream<T, ServerSession>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.fut.poll()
}
}

437
actix-server/src/worker.rs Normal file
View File

@ -0,0 +1,437 @@
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::{mem, net, time};
use actix_rt::{spawn, Arbiter};
use futures::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use futures::sync::oneshot;
use futures::{future, Async, Future, Poll, Stream};
use log::{error, info, trace};
use tokio_timer::{sleep, Delay};
use crate::accept::AcceptNotify;
use crate::counter::Counter;
use crate::services::{BoxedServerService, InternalServiceFactory, ServerMessage};
use crate::Token;
pub(crate) struct WorkerCommand(Conn);
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
graceful: bool,
result: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: net::TcpStream,
pub token: Token,
pub peer: Option<net::SocketAddr>,
}
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
}
impl WorkerClient {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerClient {
idx,
tx1,
tx2,
avail,
}
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1
.unbounded_send(WorkerCommand(msg))
.map_err(|msg| msg.into_inner().0)
}
pub fn available(&self) -> bool {
self.avail.available()
}
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.unbounded_send(StopCommand { graceful, result });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
notify: AcceptNotify,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(notify: AcceptNotify) -> Self {
WorkerAvailability {
notify,
available: Arc::new(AtomicBool::new(false)),
}
}
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
if !old && val {
self.notify.notify()
}
}
}
/// Service worker
///
/// Worker accepts Socket objects via unbounded channel and starts stream
/// processing.
pub(crate) struct Worker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<Option<(usize, BoxedServerService)>>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<InternalServiceFactory>>,
state: WorkerState,
shutdown_timeout: time::Duration,
}
impl Worker {
pub(crate) fn start(
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
factories: Vec<Box<InternalServiceFactory>>,
availability: WorkerAvailability,
shutdown_timeout: time::Duration,
) {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(|conns| Worker {
rx,
rx2,
availability,
factories,
shutdown_timeout,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable(Vec::new()),
});
let mut fut = Vec::new();
for (idx, factory) in wrk.factories.iter().enumerate() {
fut.push(factory.create().map(move |res| {
res.into_iter()
.map(|(t, s)| (idx, t, s))
.collect::<Vec<_>>()
}));
}
spawn(
future::join_all(fut)
.map_err(|e| {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
})
.and_then(move |services| {
for item in services {
for (idx, token, service) in item {
while token.0 >= wrk.services.len() {
wrk.services.push(None);
}
wrk.services[token.0] = Some((idx, service));
}
}
wrk
}),
);
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::ForceShutdown));
}
});
} else {
let timeout = self.shutdown_timeout;
self.services.iter_mut().for_each(move |h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::Shutdown(timeout)));
}
});
}
}
fn check_readiness(&mut self, trace: bool) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available();
let mut failed = None;
for (token, service) in &mut self.services.iter_mut().enumerate() {
if let Some(service) = service {
match service.1.poll_ready() {
Ok(Async::Ready(_)) => {
if trace {
trace!(
"Service {:?} is available",
self.factories[service.0].name(Token(token))
);
}
}
Ok(Async::NotReady) => ready = false,
Err(_) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[service.0].name(Token(token))
);
failed = Some((Token(token), service.0));
}
}
}
}
if let Some(idx) = failed {
Err(idx)
} else {
Ok(ready)
}
}
}
enum WorkerState {
None,
Available,
Unavailable(Vec<Conn>),
Restarting(
usize,
Token,
Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>,
),
Shutdown(Delay, Delay, oneshot::Sender<bool>),
}
impl Future for Worker {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
// `StopWorker` message handler
if let Ok(Async::Ready(Some(StopCommand { graceful, result }))) = self.rx2.poll() {
self.availability.set(false);
let num = num_connections();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
return Ok(Async::Ready(()));
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
sleep(time::Duration::from_secs(1)),
sleep(self.shutdown_timeout),
result,
);
} else {
let _ = result.send(true);
return Ok(Async::Ready(()));
}
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
return Ok(Async::Ready(()));
}
}
let state = mem::replace(&mut self.state, WorkerState::None);
match state {
WorkerState::Unavailable(mut conns) => {
match self.check_readiness(true) {
Ok(true) => {
self.state = WorkerState::Available;
// process requests from wait queue
while let Some(msg) = conns.pop() {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix net bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
}
Ok(false) => {
trace!("Worker is unavailable");
self.state = WorkerState::Unavailable(conns);
return self.poll();
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
return self.poll();
}
}
}
self.availability.set(true);
return self.poll();
}
Ok(false) => {
self.state = WorkerState::Unavailable(conns);
return Ok(Async::NotReady);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
return self.poll();
}
}
}
WorkerState::Restarting(idx, token, mut fut) => {
match fut.poll() {
Ok(Async::Ready(item)) => {
for (token, service) in item {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0] = Some((idx, service));
self.state = WorkerState::Unavailable(Vec::new());
}
}
Ok(Async::NotReady) => {
self.state = WorkerState::Restarting(idx, token, fut);
return Ok(Async::NotReady);
}
Err(_) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
}
return self.poll();
}
WorkerState::Shutdown(mut t1, mut t2, tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().stop();
return Ok(Async::Ready(()));
}
// check graceful timeout
match t2.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
self.shutdown(true);
let _ = tx.send(false);
Arbiter::current().stop();
return Ok(Async::Ready(()));
}
}
// sleep for 1 second and then check again
match t1.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
t1 = sleep(time::Duration::from_secs(1));
let _ = t1.poll();
}
}
self.state = WorkerState::Shutdown(t1, t2, tx);
return Ok(Async::NotReady);
}
WorkerState::Available => {
loop {
match self.rx.poll() {
// handle incoming tcp stream
Ok(Async::Ready(Some(WorkerCommand(msg)))) => {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix net bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
continue;
}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable(vec![msg]);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
}
}
return self.poll();
}
Ok(Async::NotReady) => {
self.state = WorkerState::Available;
return Ok(Async::NotReady);
}
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
}
}
}
WorkerState::None => panic!(),
};
}
}