1
0
mirror of https://github.com/fafhrd91/actix-net synced 2024-11-30 19:54:36 +01:00
actix-net/actix-server/src/builder.rs

503 lines
16 KiB
Rust
Raw Normal View History

use std::{
future::Future,
io, mem,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
2018-12-06 23:04:42 +01:00
use log::{error, info};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
2018-08-19 19:47:04 +02:00
use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig};
2018-12-11 06:06:54 +01:00
use crate::server::{Server, ServerCommand};
Migrate actix-net to std::future (#64) * Migrate actix-codec, actix-rt, and actix-threadpool to std::future * update to latest tokio alpha and futures-rs * Migrate actix-service to std::future, This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits, look into the semtexzv/std-future-service-tmp branch. * update futures-rs and tokio * Migrate actix-threadpool to std::future (#59) * Migrate actix-threadpool to std::future * Cosmetic refactor - turn log::error! into log::warn! as it doesn't throw any error - add Clone and Copy impls for Cancelled making it cheap to operate with - apply rustfmt * Bump up crate version to 0.2.0 and pre-fill its changelog * Disable patching 'actix-threadpool' crate in global workspace as unnecessary * Revert patching and fix 'actix-rt' * Migrate actix-rt to std::future (#47) * remove Pin from Service::poll_ready(); simplify combinators api; make code compile * disable tests * update travis config * refactor naming * drop IntoFuture trait * Migrate actix-server to std::future (#50) Still not finished, this is more WIP, this is an aggregation of several commits, which can be found in semtexzv/std-future-server-tmp branch * update actix-server * rename Factor to ServiceFactory * start server worker in start mehtod * update actix-utils * remove IntoTransform trait * Migrate actix-server::ssl::nativetls to std futures (#61) * Refactor 'nativetls' module * Migrate 'actix-server-config' to std futures - remove "uds" feature - disable features by default * Switch NativeTlsAcceptor to use 'tokio-tls' crate * Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate * update openssl impl * migrate actix-connect to std::future * migrate actix-ioframe to std::future * update version to alpha.1 * fix boxed service * migrate server rustls support * migratte openssl and rustls connecttors * store the thread's handle with arbiter (#62) * update ssl connect tests * restore service tests * update readme
2019-11-14 13:38:24 +01:00
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
2021-02-04 16:01:51 +01:00
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
2018-12-10 05:30:04 +01:00
2018-12-10 06:51:35 +01:00
/// Server builder
2018-12-10 05:30:04 +01:00
pub struct ServerBuilder {
2018-08-19 19:47:04 +02:00
threads: usize,
token: Token,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
2019-07-18 13:05:40 +02:00
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, MioListener)>,
2018-08-19 19:47:04 +02:00
accept: AcceptLoop,
exit: bool,
no_signals: bool,
2018-12-10 06:51:35 +01:00
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
2019-11-26 11:33:45 +01:00
notify: Vec<oneshot::Sender<()>>,
2021-02-04 16:01:51 +01:00
worker_config: ServerWorkerConfig,
2018-12-10 06:51:35 +01:00
}
impl Default for ServerBuilder {
fn default() -> Self {
Self::new()
}
2018-08-19 19:47:04 +02:00
}
2018-12-10 05:30:04 +01:00
impl ServerBuilder {
2018-12-10 06:51:35 +01:00
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded_channel();
2018-12-10 06:51:35 +01:00
let server = Server::new(tx);
2018-12-10 05:30:04 +01:00
ServerBuilder {
2018-08-19 19:47:04 +02:00
threads: num_cpus::get(),
token: Token::default(),
handles: Vec::new(),
2018-08-19 19:47:04 +02:00
services: Vec::new(),
sockets: Vec::new(),
2018-12-10 06:51:35 +01:00
accept: AcceptLoop::new(server.clone()),
2019-03-11 20:01:55 +01:00
backlog: 2048,
2018-08-19 19:47:04 +02:00
exit: false,
no_signals: false,
2018-12-10 06:51:35 +01:00
cmd: rx,
2019-11-26 11:33:45 +01:00
notify: Vec::new(),
2018-12-10 06:51:35 +01:00
server,
2021-02-04 16:01:51 +01:00
worker_config: ServerWorkerConfig::default(),
2018-08-19 19:47:04 +02:00
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count. Workers must be greater than 0.
2018-08-19 19:47:04 +02:00
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
2018-08-19 19:47:04 +02:00
self.threads = num;
self
}
2021-02-04 16:01:51 +01:00
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
2019-03-11 20:01:55 +01:00
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: u32) -> Self {
2019-03-11 20:01:55 +01:00
self.backlog = num;
self
}
2018-08-19 19:47:04 +02:00
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
2018-09-07 20:35:25 +02:00
/// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self {
2018-09-08 18:36:38 +02:00
worker::max_concurrent_connections(num);
2018-08-19 19:47:04 +02:00
self
}
/// Stop Actix system.
2018-08-19 19:47:04 +02:00
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling.
2018-08-19 19:47:04 +02:00
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
2018-09-27 05:40:45 +02:00
/// Timeout for graceful workers shutdown in seconds.
2018-08-19 19:47:04 +02:00
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
2018-08-19 19:47:04 +02:00
///
/// By default shutdown timeout sets to 30 seconds.
2019-05-18 19:56:41 +02:00
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
2021-02-04 16:01:51 +01:00
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
2018-08-19 19:47:04 +02:00
self
}
/// Execute external configuration as part of the server building process.
2018-08-22 20:36:56 +02:00
///
/// This function is useful for moving parts of configuration to a different module or
/// even library.
2018-12-10 06:51:35 +01:00
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
2018-08-22 20:36:56 +02:00
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
2018-08-22 20:36:56 +02:00
{
2019-03-11 20:01:55 +01:00
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
2019-12-29 05:07:46 +01:00
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
2018-08-22 20:36:56 +02:00
}
2018-12-10 06:51:35 +01:00
/// Add new service to the server.
2018-09-18 05:19:48 +02:00
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
2018-08-19 19:47:04 +02:00
where
2019-07-18 13:05:40 +02:00
F: ServiceFactory<TcpStream>,
U: ToSocketAddrs,
2018-08-19 19:47:04 +02:00
{
2019-03-11 20:01:55 +01:00
let sockets = bind_addr(addr, self.backlog)?;
2018-08-19 19:47:04 +02:00
for lst in sockets {
2019-03-09 16:27:56 +01:00
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
lst.local_addr()?,
));
2019-12-29 05:07:46 +01:00
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
2018-08-19 19:47:04 +02:00
}
Ok(self)
}
2019-07-18 13:05:40 +02:00
/// Add new unix domain service to the server.
2021-01-26 10:45:43 +01:00
#[cfg(unix)]
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
2019-07-18 13:05:40 +02:00
where
F: ServiceFactory<actix_rt::net::UnixStream>,
2019-07-18 13:05:40 +02:00
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
2019-07-18 13:05:40 +02:00
let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
2019-07-18 13:05:40 +02:00
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
2021-01-26 10:45:43 +01:00
#[cfg(unix)]
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
2019-07-18 13:05:40 +02:00
let token = self.token.next();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
2019-07-18 13:05:40 +02:00
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
2020-01-28 12:27:33 +01:00
factory,
2019-07-18 13:05:40 +02:00
addr,
));
2019-12-29 05:07:46 +01:00
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
2019-07-18 13:05:40 +02:00
Ok(self)
}
2018-12-10 06:51:35 +01:00
/// Add new service to the server.
2018-09-18 05:19:48 +02:00
pub fn listen<F, N: AsRef<str>>(
2018-10-30 04:29:47 +01:00
mut self,
name: N,
lst: StdTcpListener,
2018-10-30 04:29:47 +01:00
factory: F,
) -> io::Result<Self>
2018-08-19 19:47:04 +02:00
where
2019-07-18 13:05:40 +02:00
F: ServiceFactory<TcpStream>,
2018-08-19 19:47:04 +02:00
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
2019-12-29 05:07:46 +01:00
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
2018-09-27 05:40:45 +02:00
}
2018-12-10 06:51:35 +01:00
/// Starts processing incoming connections and return server controller.
2019-12-29 05:07:46 +01:00
pub fn run(mut self) -> Server {
2018-08-19 19:47:04 +02:00
if self.sockets.is_empty() {
2018-12-10 06:51:35 +01:00
panic!("Server should have at least one bound socket");
2018-08-19 19:47:04 +02:00
} else {
2018-09-18 05:19:48 +02:00
info!("Starting {} workers", self.threads);
2018-08-19 19:47:04 +02:00
// start workers
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle
})
.collect();
2018-08-19 19:47:04 +02:00
// start accept thread
for sock in &self.sockets {
2019-12-29 05:07:46 +01:00
info!("Starting \"{}\" service on {}", sock.1, sock.2);
2018-08-19 19:47:04 +02:00
}
2019-12-29 05:07:46 +01:00
self.accept.start(
2020-08-17 16:37:57 +02:00
mem::take(&mut self.sockets)
2019-12-29 05:07:46 +01:00
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
2019-12-29 05:07:46 +01:00
);
2018-08-19 19:47:04 +02:00
2018-12-11 06:06:54 +01:00
// handle signals
if !self.no_signals {
2020-12-13 20:26:57 +01:00
Signals::start(self.server.clone());
2018-12-11 06:06:54 +01:00
}
2018-08-19 19:47:04 +02:00
// start http server actor
2018-12-10 06:51:35 +01:00
let server = self.server.clone();
2021-01-29 03:21:06 +01:00
rt::spawn(self);
2018-12-10 06:51:35 +01:00
server
2018-08-19 19:47:04 +02:00
}
}
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
2018-08-19 19:47:04 +02:00
2021-02-04 16:01:51 +01:00
ServerWorker::start(idx, services, avail, self.worker_config)
2018-08-19 19:47:04 +02:00
}
2018-12-10 06:51:35 +01:00
2018-12-11 06:06:54 +01:00
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.wake(WakerInterest::Pause);
2018-12-11 06:06:54 +01:00
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.wake(WakerInterest::Resume);
2018-12-11 06:06:54 +01:00
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
// Signals support
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
match sig {
Signal::Int => {
info!("SIGINT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
_ => (),
}
}
2019-11-26 11:33:45 +01:00
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
2018-12-11 06:06:54 +01:00
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.wake(WakerInterest::Stop);
2020-08-17 16:37:57 +02:00
let notify = std::mem::take(&mut self.notify);
2018-12-11 06:06:54 +01:00
// stop workers
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
2021-01-29 03:21:06 +01:00
rt::spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
2021-01-29 03:21:06 +01:00
rt::spawn(async {
sleep(Duration::from_millis(300)).await;
System::current().stop();
});
}
2021-01-29 03:21:06 +01:00
});
2018-12-11 06:06:54 +01:00
} else {
// we need to stop system if server was spawned
if self.exit {
2021-01-29 03:21:06 +01:00
rt::spawn(async {
sleep(Duration::from_millis(300)).await;
System::current().stop();
});
2018-08-19 19:47:04 +02:00
}
2018-12-11 06:06:54 +01:00
if let Some(tx) = completion {
let _ = tx.send(());
}
2019-11-26 11:33:45 +01:00
for tx in notify {
let _ = tx.send(());
}
2018-12-11 06:06:54 +01:00
}
}
Migrate actix-net to std::future (#64) * Migrate actix-codec, actix-rt, and actix-threadpool to std::future * update to latest tokio alpha and futures-rs * Migrate actix-service to std::future, This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits, look into the semtexzv/std-future-service-tmp branch. * update futures-rs and tokio * Migrate actix-threadpool to std::future (#59) * Migrate actix-threadpool to std::future * Cosmetic refactor - turn log::error! into log::warn! as it doesn't throw any error - add Clone and Copy impls for Cancelled making it cheap to operate with - apply rustfmt * Bump up crate version to 0.2.0 and pre-fill its changelog * Disable patching 'actix-threadpool' crate in global workspace as unnecessary * Revert patching and fix 'actix-rt' * Migrate actix-rt to std::future (#47) * remove Pin from Service::poll_ready(); simplify combinators api; make code compile * disable tests * update travis config * refactor naming * drop IntoFuture trait * Migrate actix-server to std::future (#50) Still not finished, this is more WIP, this is an aggregation of several commits, which can be found in semtexzv/std-future-server-tmp branch * update actix-server * rename Factor to ServiceFactory * start server worker in start mehtod * update actix-utils * remove IntoTransform trait * Migrate actix-server::ssl::nativetls to std futures (#61) * Refactor 'nativetls' module * Migrate 'actix-server-config' to std futures - remove "uds" feature - disable features by default * Switch NativeTlsAcceptor to use 'tokio-tls' crate * Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate * update openssl impl * migrate actix-connect to std::future * migrate actix-ioframe to std::future * update version to alpha.1 * fix boxed service * migrate server rustls support * migratte openssl and rustls connecttors * store the thread's handle with arbiter (#62) * update ssl connect tests * restore service tests * update readme
2019-11-14 13:38:24 +01:00
ServerCommand::WorkerFaulted(idx) => {
2018-12-11 06:06:54 +01:00
let mut found = false;
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
2018-12-11 06:06:54 +01:00
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.handles.len();
2018-12-11 06:06:54 +01:00
'found: loop {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
2018-12-11 06:06:54 +01:00
new_idx += 1;
continue 'found;
2018-08-19 19:47:04 +02:00
}
}
2018-12-11 06:06:54 +01:00
break;
}
2018-08-19 19:47:04 +02:00
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
2018-12-11 06:06:54 +01:00
}
}
}
}
}
2018-12-10 06:51:35 +01:00
2018-12-11 06:06:54 +01:00
impl Future for ServerBuilder {
Migrate actix-net to std::future (#64) * Migrate actix-codec, actix-rt, and actix-threadpool to std::future * update to latest tokio alpha and futures-rs * Migrate actix-service to std::future, This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits, look into the semtexzv/std-future-service-tmp branch. * update futures-rs and tokio * Migrate actix-threadpool to std::future (#59) * Migrate actix-threadpool to std::future * Cosmetic refactor - turn log::error! into log::warn! as it doesn't throw any error - add Clone and Copy impls for Cancelled making it cheap to operate with - apply rustfmt * Bump up crate version to 0.2.0 and pre-fill its changelog * Disable patching 'actix-threadpool' crate in global workspace as unnecessary * Revert patching and fix 'actix-rt' * Migrate actix-rt to std::future (#47) * remove Pin from Service::poll_ready(); simplify combinators api; make code compile * disable tests * update travis config * refactor naming * drop IntoFuture trait * Migrate actix-server to std::future (#50) Still not finished, this is more WIP, this is an aggregation of several commits, which can be found in semtexzv/std-future-server-tmp branch * update actix-server * rename Factor to ServiceFactory * start server worker in start mehtod * update actix-utils * remove IntoTransform trait * Migrate actix-server::ssl::nativetls to std futures (#61) * Refactor 'nativetls' module * Migrate 'actix-server-config' to std futures - remove "uds" feature - disable features by default * Switch NativeTlsAcceptor to use 'tokio-tls' crate * Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate * update openssl impl * migrate actix-connect to std::future * migrate actix-ioframe to std::future * update version to alpha.1 * fix boxed service * migrate server rustls support * migratte openssl and rustls connecttors * store the thread's handle with arbiter (#62) * update ssl connect tests * restore service tests * update readme
2019-11-14 13:38:24 +01:00
type Output = ();
2018-12-11 06:06:54 +01:00
Migrate actix-net to std::future (#64) * Migrate actix-codec, actix-rt, and actix-threadpool to std::future * update to latest tokio alpha and futures-rs * Migrate actix-service to std::future, This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits, look into the semtexzv/std-future-service-tmp branch. * update futures-rs and tokio * Migrate actix-threadpool to std::future (#59) * Migrate actix-threadpool to std::future * Cosmetic refactor - turn log::error! into log::warn! as it doesn't throw any error - add Clone and Copy impls for Cancelled making it cheap to operate with - apply rustfmt * Bump up crate version to 0.2.0 and pre-fill its changelog * Disable patching 'actix-threadpool' crate in global workspace as unnecessary * Revert patching and fix 'actix-rt' * Migrate actix-rt to std::future (#47) * remove Pin from Service::poll_ready(); simplify combinators api; make code compile * disable tests * update travis config * refactor naming * drop IntoFuture trait * Migrate actix-server to std::future (#50) Still not finished, this is more WIP, this is an aggregation of several commits, which can be found in semtexzv/std-future-server-tmp branch * update actix-server * rename Factor to ServiceFactory * start server worker in start mehtod * update actix-utils * remove IntoTransform trait * Migrate actix-server::ssl::nativetls to std futures (#61) * Refactor 'nativetls' module * Migrate 'actix-server-config' to std futures - remove "uds" feature - disable features by default * Switch NativeTlsAcceptor to use 'tokio-tls' crate * Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate * update openssl impl * migrate actix-connect to std::future * migrate actix-ioframe to std::future * update version to alpha.1 * fix boxed service * migrate server rustls support * migratte openssl and rustls connecttors * store the thread's handle with arbiter (#62) * update ssl connect tests * restore service tests * update readme
2019-11-14 13:38:24 +01:00
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
2018-12-11 06:06:54 +01:00
loop {
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
2018-08-19 19:47:04 +02:00
}
}
}
}
pub(super) fn bind_addr<S: ToSocketAddrs>(
2019-03-11 20:01:55 +01:00
addr: S,
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
2018-08-19 19:47:04 +02:00
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
2019-03-11 20:01:55 +01:00
match create_tcp_listener(addr, backlog) {
2018-08-19 19:47:04 +02:00
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
2018-08-19 19:47:04 +02:00
};
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
2018-08-19 19:47:04 +02:00
}