1
0
mirror of https://github.com/actix/actix-extras.git synced 2025-06-30 20:04:26 +02:00

move server protocol impl to submodule

This commit is contained in:
Nikolay Kim
2018-01-11 18:35:05 -08:00
parent fa93701bee
commit 8a058efb4e
15 changed files with 269 additions and 236 deletions

344
src/server/channel.rs Normal file
View File

@ -0,0 +1,344 @@
use std::{ptr, mem, time, io};
use std::rc::Rc;
use std::net::{SocketAddr, Shutdown};
use bytes::{Bytes, Buf, BufMut};
use futures::{Future, Poll, Async};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::net::TcpStream;
use super::{h1, h2, HttpHandler, IoStream};
use super::settings::WorkerSettings;
enum HttpProtocol<T: IoStream, H: 'static> {
H1(h1::Http1<T, H>),
H2(h2::Http2<T, H>),
}
#[doc(hidden)]
pub struct HttpChannel<T, H>
where T: IoStream, H: HttpHandler + 'static
{
proto: Option<HttpProtocol<T, H>>,
node: Option<Node<HttpChannel<T, H>>>,
}
impl<T, H> HttpChannel<T, H>
where T: IoStream, H: HttpHandler + 'static
{
pub(crate) fn new(h: Rc<WorkerSettings<H>>,
io: T, peer: Option<SocketAddr>, http2: bool) -> HttpChannel<T, H>
{
h.add_channel();
if http2 {
HttpChannel {
node: None,
proto: Some(HttpProtocol::H2(
h2::Http2::new(h, io, peer, Bytes::new()))) }
} else {
HttpChannel {
node: None,
proto: Some(HttpProtocol::H1(
h1::Http1::new(h, io, peer))) }
}
}
fn shutdown(&mut self) {
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let io = h1.io();
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
Some(HttpProtocol::H2(ref mut h2)) => {
h2.shutdown()
}
_ => unreachable!(),
}
}
}
/*impl<T, H> Drop for HttpChannel<T, H>
where T: AsyncRead + AsyncWrite + 'static, H: HttpHandler + 'static
{
fn drop(&mut self) {
println!("Drop http channel");
}
}*/
impl<T, H> Future for HttpChannel<T, H>
where T: IoStream, H: HttpHandler + 'static
{
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_none() {
self.node = Some(Node::new(self));
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
h1.settings().head().insert(self.node.as_ref().unwrap());
}
Some(HttpProtocol::H2(ref mut h2)) => {
h2.settings().head().insert(self.node.as_ref().unwrap());
}
_ => unreachable!(),
}
}
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
match h1.poll() {
Ok(Async::Ready(h1::Http1Result::Done)) => {
h1.settings().remove_channel();
self.node.as_ref().unwrap().remove();
return Ok(Async::Ready(()))
}
Ok(Async::Ready(h1::Http1Result::Switch)) => (),
Ok(Async::NotReady) =>
return Ok(Async::NotReady),
Err(_) => {
h1.settings().remove_channel();
self.node.as_ref().unwrap().remove();
return Err(())
}
}
}
Some(HttpProtocol::H2(ref mut h2)) => {
let result = h2.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
h2.settings().remove_channel();
self.node.as_ref().unwrap().remove();
}
_ => (),
}
return result
}
None => unreachable!(),
}
// upgrade to h2
let proto = self.proto.take().unwrap();
match proto {
HttpProtocol::H1(h1) => {
let (h, io, addr, buf) = h1.into_inner();
self.proto = Some(
HttpProtocol::H2(h2::Http2::new(h, io, addr, buf)));
self.poll()
}
_ => unreachable!()
}
}
}
pub(crate) struct Node<T>
{
next: Option<*mut Node<()>>,
prev: Option<*mut Node<()>>,
element: *mut T,
}
impl<T> Node<T>
{
fn new(el: &mut T) -> Self {
Node {
next: None,
prev: None,
element: el as *mut _,
}
}
fn insert<I>(&self, next: &Node<I>) {
#[allow(mutable_transmutes)]
unsafe {
if let Some(ref next2) = self.next {
let n: &mut Node<()> = mem::transmute(next2.as_ref().unwrap());
n.prev = Some(next as *const _ as *mut _);
}
let slf: &mut Node<T> = mem::transmute(self);
slf.next = Some(next as *const _ as *mut _);
let next: &mut Node<T> = mem::transmute(next);
next.prev = Some(slf as *const _ as *mut _);
}
}
fn remove(&self) {
#[allow(mutable_transmutes)]
unsafe {
if let Some(ref prev) = self.prev {
let p: &mut Node<()> = mem::transmute(prev.as_ref().unwrap());
let slf: &mut Node<T> = mem::transmute(self);
p.next = slf.next.take();
}
}
}
}
impl Node<()> {
pub(crate) fn head() -> Self {
Node {
next: None,
prev: None,
element: ptr::null_mut(),
}
}
pub(crate) fn traverse<T, H>(&self) where T: IoStream, H: HttpHandler + 'static {
let mut next = self.next.as_ref();
loop {
if let Some(n) = next {
unsafe {
let n: &Node<()> = mem::transmute(n.as_ref().unwrap());
next = n.next.as_ref();
if !n.element.is_null() {
let ch: &mut HttpChannel<T, H> = mem::transmute(
&mut *(n.element as *mut _));
ch.shutdown();
}
}
} else {
return
}
}
}
}
impl IoStream for TcpStream {
#[inline]
fn shutdown(&mut self, how: Shutdown) -> io::Result<()> {
TcpStream::shutdown(self, how)
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
TcpStream::set_nodelay(self, nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_linger(self, dur)
}
}
/// Wrapper for `AsyncRead + AsyncWrite` types
pub(crate) struct WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
io: T,
}
impl<T> WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static
{
pub fn new(io: T) -> Self {
WrapperStream{io: io}
}
}
impl<T> IoStream for WrapperStream<T>
where T: AsyncRead + AsyncWrite + 'static
{
#[inline]
fn shutdown(&mut self, _: Shutdown) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_nodelay(&mut self, _: bool) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl<T> io::Read for WrapperStream<T>
where T: AsyncRead + AsyncWrite + 'static
{
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.read(buf)
}
}
impl<T> io::Write for WrapperStream<T>
where T: AsyncRead + AsyncWrite + 'static
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.write(buf)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.io.flush()
}
}
impl<T> AsyncRead for WrapperStream<T>
where T: AsyncRead + AsyncWrite + 'static
{
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
self.io.read_buf(buf)
}
}
impl<T> AsyncWrite for WrapperStream<T>
where T: AsyncRead + AsyncWrite + 'static
{
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.shutdown()
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
self.io.write_buf(buf)
}
}
#[cfg(feature="alpn")]
use tokio_openssl::SslStream;
#[cfg(feature="alpn")]
impl IoStream for SslStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
#[cfg(feature="tls")]
use tokio_tls::TlsStream;
#[cfg(feature="tls")]
impl IoStream for TlsStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}

1528
src/server/h1.rs Normal file

File diff suppressed because it is too large Load Diff

237
src/server/h1writer.rs Normal file
View File

@ -0,0 +1,237 @@
use std::io;
use bytes::BufMut;
use futures::{Async, Poll};
use tokio_io::AsyncWrite;
use http::Version;
use http::header::{HeaderValue, CONNECTION, DATE};
use helpers;
use body::Body;
use helpers::SharedBytes;
use encoding::PayloadEncoder;
use httprequest::HttpMessage;
use httpresponse::HttpResponse;
use server::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
bitflags! {
struct Flags: u8 {
const STARTED = 0b0000_0001;
const UPGRADE = 0b0000_0010;
const KEEPALIVE = 0b0000_0100;
const DISCONNECTED = 0b0000_1000;
}
}
pub(crate) struct H1Writer<T: AsyncWrite> {
flags: Flags,
stream: T,
encoder: PayloadEncoder,
written: u64,
headers_size: u32,
buffer: SharedBytes,
}
impl<T: AsyncWrite> H1Writer<T> {
pub fn new(stream: T, buf: SharedBytes) -> H1Writer<T> {
H1Writer {
flags: Flags::empty(),
stream: stream,
encoder: PayloadEncoder::empty(buf.clone()),
written: 0,
headers_size: 0,
buffer: buf,
}
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.stream
}
pub fn reset(&mut self) {
self.written = 0;
self.flags = Flags::empty();
}
pub fn into_inner(self) -> T {
self.stream
}
pub fn disconnected(&mut self) {
self.encoder.get_mut().take();
}
pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
}
fn write_to_stream(&mut self) -> Result<WriterState, io::Error> {
let buffer = self.encoder.get_mut();
while !buffer.is_empty() {
match self.stream.write(buffer.as_ref()) {
Ok(n) => {
let _ = buffer.split_to(n);
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
if buffer.len() > MAX_WRITE_BUFFER_SIZE {
return Ok(WriterState::Pause)
} else {
return Ok(WriterState::Done)
}
}
Err(err) => return Err(err),
}
}
Ok(WriterState::Done)
}
}
impl<T: AsyncWrite> Writer for H1Writer<T> {
#[inline]
fn written(&self) -> u64 {
self.written
}
#[inline]
fn flush(&mut self) -> Poll<(), io::Error> {
match self.stream.flush() {
Ok(_) => Ok(Async::Ready(())),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
Ok(Async::NotReady)
} else {
Err(e)
}
}
}
}
fn start(&mut self, req: &mut HttpMessage, msg: &mut HttpResponse)
-> Result<WriterState, io::Error>
{
// prepare task
self.flags.insert(Flags::STARTED);
self.encoder = PayloadEncoder::new(self.buffer.clone(), req, msg);
if msg.keep_alive().unwrap_or_else(|| req.keep_alive()) {
self.flags.insert(Flags::KEEPALIVE);
}
// Connection upgrade
let version = msg.version().unwrap_or_else(|| req.version);
if msg.upgrade() {
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("upgrade"));
}
// keep-alive
else if self.flags.contains(Flags::KEEPALIVE) {
if version < Version::HTTP_11 {
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("keep-alive"));
}
} else if version >= Version::HTTP_11 {
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("close"));
}
let body = msg.replace_body(Body::Empty);
// render message
{
let mut buffer = self.encoder.get_mut();
if let Body::Binary(ref bytes) = body {
buffer.reserve(256 + msg.headers().len() * AVERAGE_HEADER_SIZE + bytes.len());
} else {
buffer.reserve(256 + msg.headers().len() * AVERAGE_HEADER_SIZE);
}
// status line
helpers::write_status_line(version, msg.status().as_u16(), &mut buffer);
buffer.extend_from_slice(msg.reason().as_bytes());
match body {
Body::Empty =>
buffer.extend_from_slice(b"\r\ncontent-length: 0\r\n"),
Body::Binary(ref bytes) =>
helpers::write_content_length(bytes.len(), &mut buffer),
_ =>
buffer.extend_from_slice(b"\r\n"),
}
// write headers
for (key, value) in msg.headers() {
let v = value.as_ref();
let k = key.as_str().as_bytes();
buffer.reserve(k.len() + v.len() + 4);
buffer.put_slice(k);
buffer.put_slice(b": ");
buffer.put_slice(v);
buffer.put_slice(b"\r\n");
}
// using helpers::date is quite a lot faster
if !msg.headers().contains_key(DATE) {
helpers::date(&mut buffer);
} else {
// msg eof
buffer.extend_from_slice(b"\r\n");
}
self.headers_size = buffer.len() as u32;
}
if let Body::Binary(bytes) = body {
self.written = bytes.len() as u64;
self.encoder.write(bytes.as_ref())?;
} else {
msg.replace_body(body);
}
Ok(WriterState::Done)
}
fn write(&mut self, payload: &[u8]) -> Result<WriterState, io::Error> {
self.written += payload.len() as u64;
if !self.flags.contains(Flags::DISCONNECTED) {
if self.flags.contains(Flags::STARTED) {
// TODO: add warning, write after EOF
self.encoder.write(payload)?;
return Ok(WriterState::Done)
} else {
// might be response to EXCEPT
self.encoder.get_mut().extend_from_slice(payload)
}
}
if self.encoder.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
Ok(WriterState::Done)
}
}
fn write_eof(&mut self) -> Result<WriterState, io::Error> {
self.encoder.write_eof()?;
if !self.encoder.is_eof() {
Err(io::Error::new(io::ErrorKind::Other,
"Last payload item, but eof is not reached"))
} else if self.encoder.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
Ok(WriterState::Done)
}
}
#[inline]
fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> {
match self.write_to_stream() {
Ok(WriterState::Done) => {
if shutdown {
self.stream.shutdown()
} else {
Ok(Async::Ready(()))
}
},
Ok(WriterState::Pause) => Ok(Async::NotReady),
Err(err) => Err(err)
}
}
}

365
src/server/h2.rs Normal file
View File

@ -0,0 +1,365 @@
use std::{io, cmp, mem};
use std::rc::Rc;
use std::io::{Read, Write};
use std::time::Duration;
use std::net::SocketAddr;
use std::collections::VecDeque;
use actix::Arbiter;
use http::request::Parts;
use http2::{Reason, RecvStream};
use http2::server::{self, Connection, Handshake, SendResponse};
use bytes::{Buf, Bytes};
use futures::{Async, Poll, Future, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::reactor::Timeout;
use pipeline::Pipeline;
use error::PayloadError;
use encoding::PayloadType;
use httpcodes::HTTPNotFound;
use httprequest::HttpRequest;
use payload::{Payload, PayloadWriter};
use super::h2writer::H2Writer;
use super::settings::WorkerSettings;
use super::{HttpHandler, HttpHandlerTask};
bitflags! {
struct Flags: u8 {
const DISCONNECTED = 0b0000_0010;
}
}
/// HTTP/2 Transport
pub(crate) struct Http2<T, H>
where T: AsyncRead + AsyncWrite + 'static, H: 'static
{
flags: Flags,
settings: Rc<WorkerSettings<H>>,
addr: Option<SocketAddr>,
state: State<IoWrapper<T>>,
tasks: VecDeque<Entry>,
keepalive_timer: Option<Timeout>,
}
enum State<T: AsyncRead + AsyncWrite> {
Handshake(Handshake<T, Bytes>),
Connection(Connection<T, Bytes>),
Empty,
}
impl<T, H> Http2<T, H>
where T: AsyncRead + AsyncWrite + 'static,
H: HttpHandler + 'static
{
pub fn new(h: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes) -> Self
{
Http2{ flags: Flags::empty(),
settings: h,
addr: addr,
tasks: VecDeque::new(),
state: State::Handshake(
server::handshake(IoWrapper{unread: Some(buf), inner: io})),
keepalive_timer: None,
}
}
pub(crate) fn shutdown(&mut self) {
self.state = State::Empty;
self.tasks.clear();
self.keepalive_timer.take();
}
pub fn settings(&self) -> &WorkerSettings<H> {
self.settings.as_ref()
}
pub fn poll(&mut self) -> Poll<(), ()> {
// server
if let State::Connection(ref mut conn) = self.state {
// keep-alive timer
if let Some(ref mut timeout) = self.keepalive_timer {
match timeout.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()))
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
loop {
let mut not_ready = true;
// check in-flight connections
for item in &mut self.tasks {
// read payload
item.poll_payload();
if !item.flags.contains(EntryFlags::EOF) {
match item.task.poll_io(&mut item.stream) {
Ok(Async::Ready(ready)) => {
item.flags.insert(EntryFlags::EOF);
if ready {
item.flags.insert(EntryFlags::FINISHED);
}
not_ready = false;
},
Ok(Async::NotReady) => (),
Err(err) => {
error!("Unhandled error: {}", err);
item.flags.insert(EntryFlags::EOF);
item.flags.insert(EntryFlags::ERROR);
item.stream.reset(Reason::INTERNAL_ERROR);
}
}
} else if !item.flags.contains(EntryFlags::FINISHED) {
match item.task.poll() {
Ok(Async::NotReady) => (),
Ok(Async::Ready(_)) => {
not_ready = false;
item.flags.insert(EntryFlags::FINISHED);
},
Err(err) => {
item.flags.insert(EntryFlags::ERROR);
item.flags.insert(EntryFlags::FINISHED);
error!("Unhandled error: {}", err);
}
}
}
}
// cleanup finished tasks
while !self.tasks.is_empty() {
if self.tasks[0].flags.contains(EntryFlags::EOF) &&
self.tasks[0].flags.contains(EntryFlags::FINISHED) ||
self.tasks[0].flags.contains(EntryFlags::ERROR)
{
self.tasks.pop_front();
} else {
break
}
}
// get request
if !self.flags.contains(Flags::DISCONNECTED) {
match conn.poll() {
Ok(Async::Ready(None)) => {
not_ready = false;
self.flags.insert(Flags::DISCONNECTED);
for entry in &mut self.tasks {
entry.task.disconnected()
}
},
Ok(Async::Ready(Some((req, resp)))) => {
not_ready = false;
let (parts, body) = req.into_parts();
// stop keepalive timer
self.keepalive_timer.take();
self.tasks.push_back(
Entry::new(parts, body, resp, self.addr, &self.settings));
}
Ok(Async::NotReady) => {
// start keep-alive timer
if self.tasks.is_empty() {
if self.settings.keep_alive_enabled() {
let keep_alive = self.settings.keep_alive();
if keep_alive > 0 && self.keepalive_timer.is_none() {
trace!("Start keep-alive timer");
let mut timeout = Timeout::new(
Duration::new(keep_alive, 0),
Arbiter::handle()).unwrap();
// register timeout
let _ = timeout.poll();
self.keepalive_timer = Some(timeout);
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(
|e| error!("Error during connection close: {}", e))
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady)
}
}
Err(err) => {
trace!("Connection error: {}", err);
self.flags.insert(Flags::DISCONNECTED);
for entry in &mut self.tasks {
entry.task.disconnected()
}
self.keepalive_timer.take();
},
}
}
if not_ready {
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED) {
return conn.poll_close().map_err(
|e| error!("Error during connection close: {}", e))
} else {
return Ok(Async::NotReady)
}
}
}
}
// handshake
self.state = if let State::Handshake(ref mut handshake) = self.state {
match handshake.poll() {
Ok(Async::Ready(conn)) => {
State::Connection(conn)
},
Ok(Async::NotReady) =>
return Ok(Async::NotReady),
Err(err) => {
trace!("Error handling connection: {}", err);
return Err(())
}
}
} else {
mem::replace(&mut self.state, State::Empty)
};
self.poll()
}
}
bitflags! {
struct EntryFlags: u8 {
const EOF = 0b0000_0001;
const REOF = 0b0000_0010;
const ERROR = 0b0000_0100;
const FINISHED = 0b0000_1000;
}
}
struct Entry {
task: Box<HttpHandlerTask>,
payload: PayloadType,
recv: RecvStream,
stream: H2Writer,
capacity: usize,
flags: EntryFlags,
}
impl Entry {
fn new<H>(parts: Parts,
recv: RecvStream,
resp: SendResponse<Bytes>,
addr: Option<SocketAddr>,
settings: &Rc<WorkerSettings<H>>) -> Entry
where H: HttpHandler + 'static
{
// Payload and Content-Encoding
let (psender, payload) = Payload::new(false);
let msg = settings.get_http_message();
msg.get_mut().uri = parts.uri;
msg.get_mut().method = parts.method;
msg.get_mut().version = parts.version;
msg.get_mut().headers = parts.headers;
msg.get_mut().payload = Some(payload);
msg.get_mut().addr = addr;
let mut req = HttpRequest::from_message(msg);
// Payload sender
let psender = PayloadType::new(req.headers(), psender);
// start request processing
let mut task = None;
for h in settings.handlers().iter_mut() {
req = match h.handle(req) {
Ok(t) => {
task = Some(t);
break
},
Err(req) => req,
}
}
Entry {task: task.unwrap_or_else(|| Pipeline::error(HTTPNotFound)),
payload: psender,
recv: recv,
stream: H2Writer::new(resp, settings.get_shared_bytes()),
flags: EntryFlags::empty(),
capacity: 0,
}
}
fn poll_payload(&mut self) {
if !self.flags.contains(EntryFlags::REOF) {
match self.recv.poll() {
Ok(Async::Ready(Some(chunk))) => {
self.payload.feed_data(chunk);
},
Ok(Async::Ready(None)) => {
self.flags.insert(EntryFlags::REOF);
},
Ok(Async::NotReady) => (),
Err(err) => {
self.payload.set_error(PayloadError::Http2(err))
}
}
let capacity = self.payload.capacity();
if self.capacity != capacity {
self.capacity = capacity;
if let Err(err) = self.recv.release_capacity().release_capacity(capacity) {
self.payload.set_error(PayloadError::Http2(err))
}
}
}
}
}
struct IoWrapper<T> {
unread: Option<Bytes>,
inner: T,
}
impl<T: Read> Read for IoWrapper<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if let Some(mut bytes) = self.unread.take() {
let size = cmp::min(buf.len(), bytes.len());
buf[..size].copy_from_slice(&bytes[..size]);
if bytes.len() > size {
bytes.split_to(size);
self.unread = Some(bytes);
}
Ok(size)
} else {
self.inner.read(buf)
}
}
}
impl<T: Write> Write for IoWrapper<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<T: AsyncRead + 'static> AsyncRead for IoWrapper<T> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
}
impl<T: AsyncWrite + 'static> AsyncWrite for IoWrapper<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.inner.shutdown()
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
self.inner.write_buf(buf)
}
}

227
src/server/h2writer.rs Normal file
View File

@ -0,0 +1,227 @@
use std::{io, cmp};
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll};
use http2::{Reason, SendStream};
use http2::server::SendResponse;
use http::{Version, HttpTryFrom, Response};
use http::header::{HeaderValue, CONNECTION, TRANSFER_ENCODING, DATE, CONTENT_LENGTH};
use helpers;
use body::Body;
use helpers::SharedBytes;
use encoding::PayloadEncoder;
use httprequest::HttpMessage;
use httpresponse::HttpResponse;
use server::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
const CHUNK_SIZE: usize = 16_384;
bitflags! {
struct Flags: u8 {
const STARTED = 0b0000_0001;
const DISCONNECTED = 0b0000_0010;
const EOF = 0b0000_0100;
}
}
pub(crate) struct H2Writer {
respond: SendResponse<Bytes>,
stream: Option<SendStream<Bytes>>,
encoder: PayloadEncoder,
flags: Flags,
written: u64,
buffer: SharedBytes,
}
impl H2Writer {
pub fn new(respond: SendResponse<Bytes>, buf: SharedBytes) -> H2Writer {
H2Writer {
respond: respond,
stream: None,
encoder: PayloadEncoder::empty(buf.clone()),
flags: Flags::empty(),
written: 0,
buffer: buf,
}
}
pub fn reset(&mut self, reason: Reason) {
if let Some(mut stream) = self.stream.take() {
stream.send_reset(reason)
}
}
fn write_to_stream(&mut self) -> Result<WriterState, io::Error> {
if !self.flags.contains(Flags::STARTED) {
return Ok(WriterState::Done)
}
if let Some(ref mut stream) = self.stream {
let buffer = self.encoder.get_mut();
if buffer.is_empty() {
if self.flags.contains(Flags::EOF) {
let _ = stream.send_data(Bytes::new(), true);
}
return Ok(WriterState::Done)
}
loop {
match stream.poll_capacity() {
Ok(Async::NotReady) => {
if buffer.len() > MAX_WRITE_BUFFER_SIZE {
return Ok(WriterState::Pause)
} else {
return Ok(WriterState::Done)
}
}
Ok(Async::Ready(None)) => {
return Ok(WriterState::Done)
}
Ok(Async::Ready(Some(cap))) => {
let len = buffer.len();
let bytes = buffer.split_to(cmp::min(cap, len));
let eof = buffer.is_empty() && self.flags.contains(Flags::EOF);
self.written += bytes.len() as u64;
if let Err(err) = stream.send_data(bytes.freeze(), eof) {
return Err(io::Error::new(io::ErrorKind::Other, err))
} else if !buffer.is_empty() {
let cap = cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
return Ok(WriterState::Pause)
}
}
Err(_) => {
return Err(io::Error::new(io::ErrorKind::Other, ""))
}
}
}
}
Ok(WriterState::Done)
}
}
impl Writer for H2Writer {
fn written(&self) -> u64 {
self.written
}
#[inline]
fn flush(&mut self) -> Poll<(), io::Error> {
Ok(Async::Ready(()))
}
fn start(&mut self, req: &mut HttpMessage, msg: &mut HttpResponse)
-> Result<WriterState, io::Error>
{
// trace!("Prepare response with status: {:?}", msg.status());
// prepare response
self.flags.insert(Flags::STARTED);
self.encoder = PayloadEncoder::new(self.buffer.clone(), req, msg);
if let Body::Empty = *msg.body() {
self.flags.insert(Flags::EOF);
}
// http2 specific
msg.headers_mut().remove(CONNECTION);
msg.headers_mut().remove(TRANSFER_ENCODING);
// using helpers::date is quite a lot faster
if !msg.headers().contains_key(DATE) {
let mut bytes = BytesMut::with_capacity(29);
helpers::date_value(&mut bytes);
msg.headers_mut().insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
}
let body = msg.replace_body(Body::Empty);
match body {
Body::Binary(ref bytes) => {
let mut val = BytesMut::new();
helpers::convert_usize(bytes.len(), &mut val);
let l = val.len();
msg.headers_mut().insert(
CONTENT_LENGTH, HeaderValue::try_from(val.split_to(l-2).freeze()).unwrap());
}
Body::Empty => {
msg.headers_mut().insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
},
_ => (),
}
let mut resp = Response::new(());
*resp.status_mut() = msg.status();
*resp.version_mut() = Version::HTTP_2;
for (key, value) in msg.headers().iter() {
resp.headers_mut().insert(key, value.clone());
}
match self.respond.send_response(resp, self.flags.contains(Flags::EOF)) {
Ok(stream) =>
self.stream = Some(stream),
Err(_) =>
return Err(io::Error::new(io::ErrorKind::Other, "err")),
}
trace!("Response: {:?}", msg);
if let Body::Binary(bytes) = body {
self.flags.insert(Flags::EOF);
self.written = bytes.len() as u64;
self.encoder.write(bytes.as_ref())?;
if let Some(ref mut stream) = self.stream {
stream.reserve_capacity(cmp::min(self.encoder.len(), CHUNK_SIZE));
}
Ok(WriterState::Pause)
} else {
msg.replace_body(body);
Ok(WriterState::Done)
}
}
fn write(&mut self, payload: &[u8]) -> Result<WriterState, io::Error> {
self.written = payload.len() as u64;
if !self.flags.contains(Flags::DISCONNECTED) {
if self.flags.contains(Flags::STARTED) {
// TODO: add warning, write after EOF
self.encoder.write(payload)?;
} else {
// might be response for EXCEPT
self.encoder.get_mut().extend_from_slice(payload)
}
}
if self.encoder.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
Ok(WriterState::Done)
}
}
fn write_eof(&mut self) -> Result<WriterState, io::Error> {
self.encoder.write_eof()?;
self.flags.insert(Flags::EOF);
if !self.encoder.is_eof() {
Err(io::Error::new(io::ErrorKind::Other,
"Last payload item, but eof is not reached"))
} else if self.encoder.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
Ok(WriterState::Done)
}
}
fn poll_completed(&mut self, _shutdown: bool) -> Poll<(), io::Error> {
match self.write_to_stream() {
Ok(WriterState::Done) => Ok(Async::Ready(())),
Ok(WriterState::Pause) => Ok(Async::NotReady),
Err(err) => Err(err)
}
}
}

109
src/server/mod.rs Normal file
View File

@ -0,0 +1,109 @@
//! Http server
use std::{time, io};
use std::net::Shutdown;
use futures::Poll;
use tokio_io::{AsyncRead, AsyncWrite};
mod srv;
mod worker;
mod channel;
mod h1;
mod h2;
mod h1writer;
mod h2writer;
mod settings;
pub use self::srv::HttpServer;
pub use self::settings::ServerSettings;
use error::Error;
use httprequest::{HttpMessage, HttpRequest};
use httpresponse::HttpResponse;
/// max buffer size 64k
pub(crate) const MAX_WRITE_BUFFER_SIZE: usize = 65_536;
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
#[derive(Message)]
pub struct PauseServer;
/// Resume accepting incoming connections
#[derive(Message)]
pub struct ResumeServer;
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
#[derive(Message)]
pub struct StopServer {
pub graceful: bool
}
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Handle request
fn handle(&mut self, req: HttpRequest) -> Result<Box<HttpHandlerTask>, HttpRequest>;
}
pub trait HttpHandlerTask {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
fn poll(&mut self) -> Poll<(), Error>;
fn disconnected(&mut self);
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self, settings: ServerSettings) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self, _: ServerSettings) -> Self::Handler {
self
}
}
/// Low-level io stream operations
pub trait IoStream: AsyncRead + AsyncWrite + 'static {
fn shutdown(&mut self, how: Shutdown) -> io::Result<()>;
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>;
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
}
#[derive(Debug)]
pub enum WriterState {
Done,
Pause,
}
/// Stream writer
pub trait Writer {
fn written(&self) -> u64;
fn start(&mut self, req: &mut HttpMessage, resp: &mut HttpResponse)
-> Result<WriterState, io::Error>;
fn write(&mut self, payload: &[u8]) -> Result<WriterState, io::Error>;
fn write_eof(&mut self) -> Result<WriterState, io::Error>;
fn flush(&mut self) -> Poll<(), io::Error>;
fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error>;
}

125
src/server/settings.rs Normal file
View File

@ -0,0 +1,125 @@
use std::net;
use std::rc::Rc;
use std::cell::{Cell, RefCell, RefMut};
use helpers;
use super::channel::Node;
/// Various server settings
#[derive(Debug, Clone)]
pub struct ServerSettings {
addr: Option<net::SocketAddr>,
secure: bool,
host: String,
}
impl Default for ServerSettings {
fn default() -> Self {
ServerSettings {
addr: None,
secure: false,
host: "localhost:8080".to_owned(),
}
}
}
impl ServerSettings {
/// Crate server settings instance
pub(crate) fn new(addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool)
-> ServerSettings
{
let host = if let Some(ref host) = *host {
host.clone()
} else if let Some(ref addr) = addr {
format!("{}", addr)
} else {
"localhost".to_owned()
};
ServerSettings {
addr: addr,
secure: secure,
host: host,
}
}
/// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.addr
}
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool {
self.secure
}
/// Returns host header value
pub fn host(&self) -> &str {
&self.host
}
}
pub(crate) struct WorkerSettings<H> {
h: RefCell<Vec<H>>,
enabled: bool,
keep_alive: u64,
bytes: Rc<helpers::SharedBytesPool>,
messages: Rc<helpers::SharedMessagePool>,
channels: Cell<usize>,
node: Node<()>,
}
impl<H> WorkerSettings<H> {
pub(crate) fn new(h: Vec<H>, keep_alive: Option<u64>) -> WorkerSettings<H> {
WorkerSettings {
h: RefCell::new(h),
enabled: if let Some(ka) = keep_alive { ka > 0 } else { false },
keep_alive: keep_alive.unwrap_or(0),
bytes: Rc::new(helpers::SharedBytesPool::new()),
messages: Rc::new(helpers::SharedMessagePool::new()),
channels: Cell::new(0),
node: Node::head(),
}
}
pub fn num_channels(&self) -> usize {
self.channels.get()
}
pub fn head(&self) -> &Node<()> {
&self.node
}
pub fn handlers(&self) -> RefMut<Vec<H>> {
self.h.borrow_mut()
}
pub fn keep_alive(&self) -> u64 {
self.keep_alive
}
pub fn keep_alive_enabled(&self) -> bool {
self.enabled
}
pub fn get_shared_bytes(&self) -> helpers::SharedBytes {
helpers::SharedBytes::new(self.bytes.get_bytes(), Rc::clone(&self.bytes))
}
pub fn get_http_message(&self) -> helpers::SharedHttpMessage {
helpers::SharedHttpMessage::new(self.messages.get(), Rc::clone(&self.messages))
}
pub fn add_channel(&self) {
self.channels.set(self.channels.get() + 1);
}
pub fn remove_channel(&self) {
let num = self.channels.get();
if num > 0 {
self.channels.set(num-1);
} else {
error!("Number of removed channels is bigger than added channel. Bug in actix-web");
}
}
}

756
src/server/srv.rs Normal file
View File

@ -0,0 +1,756 @@
use std::{io, net, thread};
use std::rc::Rc;
use std::sync::{Arc, mpsc as sync_mpsc};
use std::time::Duration;
use std::marker::PhantomData;
use std::collections::HashMap;
use actix::prelude::*;
use actix::actors::signal;
use futures::{Future, Sink, Stream};
use futures::sync::mpsc;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::net::TcpStream;
use mio;
use num_cpus;
use net2::TcpBuilder;
#[cfg(feature="tls")]
use native_tls::TlsAcceptor;
#[cfg(feature="tls")]
use tokio_tls::TlsStream;
#[cfg(feature="alpn")]
use openssl::ssl::{SslMethod, SslAcceptorBuilder};
#[cfg(feature="alpn")]
use openssl::pkcs12::ParsedPkcs12;
#[cfg(feature="alpn")]
use tokio_openssl::SslStream;
use helpers;
use super::{HttpHandler, IntoHttpHandler, IoStream};
use super::{PauseServer, ResumeServer, StopServer};
use super::channel::{HttpChannel, WrapperStream};
use super::worker::{Conn, Worker, StreamHandlerType, StopWorker};
use super::settings::{ServerSettings, WorkerSettings};
/// An HTTP Server
///
/// `T` - async stream, anything that implements `AsyncRead` + `AsyncWrite`.
///
/// `A` - peer address
///
/// `H` - request handler
pub struct HttpServer<T, A, H, U>
where H: HttpHandler + 'static
{
h: Option<Rc<WorkerSettings<H>>>,
io: PhantomData<T>,
addr: PhantomData<A>,
threads: usize,
backlog: i32,
host: Option<String>,
keep_alive: Option<u64>,
factory: Arc<Fn() -> U + Send + Sync>,
workers: Vec<SyncAddress<Worker<H>>>,
sockets: HashMap<net::SocketAddr, net::TcpListener>,
accept: Vec<(mio::SetReadiness, sync_mpsc::Sender<Command>)>,
exit: bool,
shutdown_timeout: u16,
signals: Option<SyncAddress<signal::ProcessSignals>>,
no_signals: bool,
}
unsafe impl<T, A, H, U> Sync for HttpServer<T, A, H, U> where H: HttpHandler + 'static {}
unsafe impl<T, A, H, U> Send for HttpServer<T, A, H, U> where H: HttpHandler + 'static {}
impl<T, A, H, U, V> Actor for HttpServer<T, A, H, U>
where A: 'static,
T: IoStream,
H: HttpHandler,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.update_time(ctx);
}
}
impl<T, A, H, U, V> HttpServer<T, A, H, U>
where A: 'static,
T: IoStream,
H: HttpHandler,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
/// Create new http server with application factory
pub fn new<F>(factory: F) -> Self
where F: Sync + Send + 'static + Fn() -> U,
{
HttpServer{ h: None,
io: PhantomData,
addr: PhantomData,
threads: num_cpus::get(),
backlog: 2048,
host: None,
keep_alive: None,
factory: Arc::new(factory),
workers: Vec::new(),
sockets: HashMap::new(),
accept: Vec::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_signals: false,
}
}
fn update_time(&self, ctx: &mut Context<Self>) {
helpers::update_date();
ctx.run_later(Duration::new(1, 0), |slf, ctx| slf.update_time(ctx));
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads count.
pub fn threads(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self {
self.backlog = num;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is enabled.
///
/// - `Some(75)` - enable
///
/// - `Some(0)` - disable
///
/// - `None` - use `SO_KEEPALIVE` socket option
pub fn keep_alive(mut self, val: Option<u64>) -> Self {
self.keep_alive = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url generation.
/// Check [ConnectionInfo](./dev/struct.ConnectionInfo.html#method.host) documentation
/// for more information.
pub fn server_hostname(mut self, val: String) -> Self {
self.host = Some(val);
self
}
/// Send `SystemExit` message to actix system
///
/// `SystemExit` message stops currently running system arbiter and all
/// nested arbiters.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: SyncAddress<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Get addresses of bound sockets.
pub fn addrs(&self) -> Vec<net::SocketAddr> {
self.sockets.keys().cloned().collect()
}
/// The socket address to bind
///
/// To mind multiple addresses this method can be call multiple times.
pub fn bind<S: net::ToSocketAddrs>(mut self, addr: S) -> io::Result<Self> {
let mut err = None;
let mut succ = false;
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, self.backlog) {
Ok(lst) => {
succ = true;
self.sockets.insert(lst.local_addr().unwrap(), lst);
},
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(io::ErrorKind::Other, "Can not bind to address."))
}
} else {
Ok(self)
}
}
fn start_workers(&mut self, settings: &ServerSettings, handler: &StreamHandlerType)
-> Vec<mpsc::UnboundedSender<Conn<net::TcpStream>>>
{
// start workers
let mut workers = Vec::new();
for _ in 0..self.threads {
let s = settings.clone();
let (tx, rx) = mpsc::unbounded::<Conn<net::TcpStream>>();
let h = handler.clone();
let ka = self.keep_alive;
let factory = Arc::clone(&self.factory);
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
let apps: Vec<_> = (*factory)()
.into_iter()
.map(|h| h.into_handler(s.clone())).collect();
ctx.add_message_stream(rx);
Worker::new(apps, h, ka)
});
workers.push(tx);
self.workers.push(addr);
}
info!("Starting {} http workers", self.threads);
workers
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<SyncAddress<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(Arbiter::system_registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
}
impl<H: HttpHandler, U, V> HttpServer<TcpStream, net::SocketAddr, H, U>
where U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
/// Start listening for incomming connections.
///
/// This method starts number of http handler workers in seperate threads.
/// For each address this method starts separate thread which does `accept()` in a loop.
///
/// This methods panics if no socket addresses get bound.
///
/// This method requires to run within properly configured `Actix` system.
///
/// ```rust
/// extern crate actix;
/// extern crate actix_web;
/// use actix_web::*;
///
/// fn main() {
/// let sys = actix::System::new("example"); // <- create Actix system
///
/// HttpServer::new(
/// || Application::new()
/// .resource("/", |r| r.h(httpcodes::HTTPOk)))
/// .bind("127.0.0.1:0").expect("Can not bind to 127.0.0.1:0")
/// .start();
/// # actix::Arbiter::system().send(actix::msgs::SystemExit(0));
///
/// let _ = sys.run(); // <- Run actix system, this method actually starts all async processes
/// }
/// ```
pub fn start(mut self) -> SyncAddress<Self>
{
if self.sockets.is_empty() {
panic!("HttpServer::bind() has to be called befor start()");
} else {
let addrs: Vec<(net::SocketAddr, net::TcpListener)> =
self.sockets.drain().collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(&settings, &StreamHandlerType::Normal);
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting http server on {}", addr);
self.accept.push(
start_accept_thread(sock, addr, self.backlog, workers.clone()));
}
// start http server actor
let signals = self.subscribe_to_signals();
let addr: SyncAddress<_> = Actor::start(self);
signals.map(|signals| signals.send(signal::Subscribe(addr.subscriber())));
addr
}
}
/// Spawn new thread and start listening for incomming connections.
///
/// This method spawns new thread and starts new actix system. Other than that it is
/// similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// HttpServer::new(
/// || Application::new()
/// .resource("/", |r| r.h(httpcodes::HTTPOk)))
/// .bind("127.0.0.1:0").expect("Can not bind to 127.0.0.1:0")
/// .run();
/// }
/// ```
pub fn run(mut self) {
self.exit = true;
self.no_signals = false;
let _ = thread::spawn(move || {
let sys = System::new("http-server");
self.start();
let _ = sys.run();
}).join();
}
}
#[cfg(feature="tls")]
impl<H: HttpHandler, U, V> HttpServer<TlsStream<TcpStream>, net::SocketAddr, H, U>
where U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
/// Start listening for incomming tls connections.
pub fn start_tls(mut self, pkcs12: ::Pkcs12) -> io::Result<SyncAddress<Self>> {
if self.sockets.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "No socket addresses are bound"))
} else {
let addrs: Vec<(net::SocketAddr, net::TcpListener)> = self.sockets.drain().collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let acceptor = match TlsAcceptor::builder(pkcs12) {
Ok(builder) => {
match builder.build() {
Ok(acceptor) => acceptor,
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err))
}
}
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err))
};
let workers = self.start_workers(&settings, &StreamHandlerType::Tls(acceptor));
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting tls http server on {}", addr);
self.accept.push(
start_accept_thread(sock, addr, self.backlog, workers.clone()));
}
// start http server actor
let signals = self.subscribe_to_signals();
let addr: SyncAddress<_> = Actor::start(self);
signals.map(|signals| signals.send(signal::Subscribe(addr.subscriber())));
Ok(addr)
}
}
}
#[cfg(feature="alpn")]
impl<H: HttpHandler, U, V> HttpServer<SslStream<TcpStream>, net::SocketAddr, H, U>
where U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
/// Start listening for incomming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn start_ssl(mut self, identity: &ParsedPkcs12) -> io::Result<SyncAddress<Self>> {
if self.sockets.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "No socket addresses are bound"))
} else {
let addrs: Vec<(net::SocketAddr, net::TcpListener)> = self.sockets.drain().collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let acceptor = match SslAcceptorBuilder::mozilla_intermediate(
SslMethod::tls(), &identity.pkey, &identity.cert, &identity.chain)
{
Ok(mut builder) => {
match builder.set_alpn_protocols(&[b"h2", b"http/1.1"]) {
Ok(_) => builder.build(),
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err)),
}
},
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err))
};
let workers = self.start_workers(&settings, &StreamHandlerType::Alpn(acceptor));
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting tls http server on {}", addr);
self.accept.push(
start_accept_thread(sock, addr, self.backlog, workers.clone()));
}
// start http server actor
let signals = self.subscribe_to_signals();
let addr: SyncAddress<_> = Actor::start(self);
signals.map(|signals| signals.send(signal::Subscribe(addr.subscriber())));
Ok(addr)
}
}
}
impl<T, A, H, U, V> HttpServer<WrapperStream<T>, A, H, U>
where A: 'static,
T: AsyncRead + AsyncWrite + 'static,
H: HttpHandler,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
{
/// Start listening for incomming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<S>(mut self, stream: S, secure: bool) -> SyncAddress<Self>
where S: Stream<Item=(T, A), Error=io::Error> + 'static
{
if !self.sockets.is_empty() {
let addrs: Vec<(net::SocketAddr, net::TcpListener)> =
self.sockets.drain().collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(&settings, &StreamHandlerType::Normal);
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting http server on {}", addr);
self.accept.push(
start_accept_thread(sock, addr, self.backlog, workers.clone()));
}
}
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler(settings.clone())).collect();
self.h = Some(Rc::new(WorkerSettings::new(apps, self.keep_alive)));
// start server
let signals = self.subscribe_to_signals();
let addr: SyncAddress<_> = HttpServer::create(move |ctx| {
ctx.add_stream(stream.map(
move |(t, _)| Conn{io: WrapperStream::new(t), peer: None, http2: false}));
self
});
signals.map(|signals| signals.send(signal::Subscribe(addr.subscriber())));
addr
}
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and send `SystemExit(0)`
/// message to `System` actor.
impl<T, A, H, U, V> Handler<signal::Signal> for HttpServer<T, A, H, U>
where T: IoStream,
H: HttpHandler + 'static,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
A: 'static,
{
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: false}, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: true}, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: false}, ctx);
}
_ => (),
}
}
}
impl<T, A, H, U, V> Handler<io::Result<Conn<T>>> for HttpServer<T, A, H, U>
where T: IoStream,
H: HttpHandler + 'static,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
A: 'static,
{
type Result = ();
fn handle(&mut self, msg: io::Result<Conn<T>>, _: &mut Context<Self>) -> Self::Result {
match msg {
Ok(msg) =>
Arbiter::handle().spawn(
HttpChannel::new(
Rc::clone(self.h.as_ref().unwrap()), msg.io, msg.peer, msg.http2)),
Err(err) =>
debug!("Error handling request: {}", err),
}
}
}
impl<T, A, H, U, V> Handler<PauseServer> for HttpServer<T, A, H, U>
where T: IoStream,
H: HttpHandler + 'static,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
A: 'static,
{
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>)
{
for item in &self.accept {
let _ = item.1.send(Command::Pause);
let _ = item.0.set_readiness(mio::Ready::readable());
}
}
}
impl<T, A, H, U, V> Handler<ResumeServer> for HttpServer<T, A, H, U>
where T: IoStream,
H: HttpHandler + 'static,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
A: 'static,
{
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
for item in &self.accept {
let _ = item.1.send(Command::Resume);
let _ = item.0.set_readiness(mio::Ready::readable());
}
}
}
impl<T, A, H, U, V> Handler<StopServer> for HttpServer<T, A, H, U>
where T: IoStream,
H: HttpHandler + 'static,
U: IntoIterator<Item=V> + 'static,
V: IntoHttpHandler<Handler=H>,
A: 'static,
{
type Result = actix::Response<Self, StopServer>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept threads
for item in &self.accept {
let _ = item.1.send(Command::Stop);
let _ = item.0.set_readiness(mio::Ready::readable());
}
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
let fut = worker.call(self, StopWorker{graceful: dur});
ActorFuture::then(fut, move |_, slf, _| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
Arbiter::system().send(actix::msgs::SystemExit(0))
}
}
actix::fut::ok(())
}).spawn(ctx);
}
if !self.workers.is_empty() {
Self::async_reply(
rx.into_future().map(|_| ()).map_err(|_| ()).actfuture())
} else {
// we need to stop system if server was spawned
if self.exit {
Arbiter::system().send(actix::msgs::SystemExit(0))
}
Self::reply(Ok(()))
}
}
}
enum Command {
Pause,
Resume,
Stop,
}
fn start_accept_thread(sock: net::TcpListener, addr: net::SocketAddr, backlog: i32,
workers: Vec<mpsc::UnboundedSender<Conn<net::TcpStream>>>)
-> (mio::SetReadiness, sync_mpsc::Sender<Command>)
{
let (tx, rx) = sync_mpsc::channel();
let (reg, readiness) = mio::Registration::new2();
// start accept thread
let _ = thread::Builder::new().name(format!("Accept on {}", addr)).spawn(move || {
const SRV: mio::Token = mio::Token(0);
const CMD: mio::Token = mio::Token(1);
let mut server = Some(
mio::net::TcpListener::from_listener(sock, &addr)
.expect("Can not create mio::net::TcpListener"));
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start listening for incoming connections
if let Some(ref srv) = server {
if let Err(err) = poll.register(
srv, SRV, mio::Ready::readable(), mio::PollOpt::edge()) {
panic!("Can not register io: {}", err);
}
}
// Start listening for incommin commands
if let Err(err) = poll.register(&reg, CMD,
mio::Ready::readable(), mio::PollOpt::edge()) {
panic!("Can not register Registration: {}", err);
}
// Create storage for events
let mut events = mio::Events::with_capacity(128);
let mut next = 0;
loop {
if let Err(err) = poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
match event.token() {
SRV => {
if let Some(ref server) = server {
loop {
match server.accept_std() {
Ok((sock, addr)) => {
let msg = Conn{
io: sock, peer: Some(addr), http2: false};
workers[next].unbounded_send(msg)
.expect("worker thread died");
next = (next + 1) % workers.len();
},
Err(err) => if err.kind() == io::ErrorKind::WouldBlock {
break
} else {
error!("Error accepting connection: {:?}", err);
return
}
}
}
}
},
CMD => match rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => if let Some(server) = server.take() {
if let Err(err) = poll.deregister(&server) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", addr);
}
},
Command::Resume => {
let lst = create_tcp_listener(addr, backlog)
.expect("Can not create net::TcpListener");
server = Some(
mio::net::TcpListener::from_listener(lst, &addr)
.expect("Can not create mio::net::TcpListener"));
if let Some(ref server) = server {
if let Err(err) = poll.register(
server, SRV, mio::Ready::readable(), mio::PollOpt::edge())
{
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed",
addr);
}
}
},
Command::Stop => return,
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => (),
sync_mpsc::TryRecvError::Disconnected => return,
}
},
_ => unreachable!(),
}
}
}
});
(readiness, tx)
}
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
};
builder.bind(addr)?;
builder.reuse_address(true)?;
Ok(builder.listen(backlog)?)
}

210
src/server/worker.rs Normal file
View File

@ -0,0 +1,210 @@
use std::{net, time};
use std::rc::Rc;
use futures::Future;
use futures::unsync::oneshot;
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use net2::TcpStreamExt;
#[cfg(feature="tls")]
use futures::future;
#[cfg(feature="tls")]
use native_tls::TlsAcceptor;
#[cfg(feature="tls")]
use tokio_tls::TlsAcceptorExt;
#[cfg(feature="alpn")]
use futures::future;
#[cfg(feature="alpn")]
use openssl::ssl::SslAcceptor;
#[cfg(feature="alpn")]
use tokio_openssl::SslAcceptorExt;
use actix::*;
use actix::msgs::StopArbiter;
use helpers;
use server::HttpHandler;
use server::channel::HttpChannel;
use server::settings::WorkerSettings;
#[derive(Message)]
pub(crate) struct Conn<T> {
pub io: T,
pub peer: Option<net::SocketAddr>,
pub http2: bool,
}
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
#[derive(Message)]
#[rtype(bool)]
pub(crate) struct StopWorker {
pub graceful: Option<time::Duration>,
}
/// Http worker
///
/// Worker accepts Socket objects via unbounded channel and start requests processing.
pub(crate) struct Worker<H> where H: HttpHandler + 'static {
settings: Rc<WorkerSettings<H>>,
hnd: Handle,
handler: StreamHandlerType,
}
impl<H: HttpHandler + 'static> Worker<H> {
pub(crate) fn new(h: Vec<H>, handler: StreamHandlerType, keep_alive: Option<u64>)
-> Worker<H>
{
Worker {
settings: Rc::new(WorkerSettings::new(h, keep_alive)),
hnd: Arbiter::handle().clone(),
handler: handler,
}
}
fn update_time(&self, ctx: &mut Context<Self>) {
helpers::update_date();
ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_time(ctx));
}
fn shutdown_timeout(&self, ctx: &mut Context<Self>,
tx: oneshot::Sender<bool>, dur: time::Duration) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.settings.num_channels();
if num == 0 {
let _ = tx.send(true);
Arbiter::arbiter().send(StopArbiter(0));
} else if let Some(d) = dur.checked_sub(time::Duration::new(1, 0)) {
slf.shutdown_timeout(ctx, tx, d);
} else {
info!("Force shutdown http worker, {} connections", num);
slf.settings.head().traverse::<TcpStream, H>();
let _ = tx.send(false);
Arbiter::arbiter().send(StopArbiter(0));
}
});
}
}
impl<H: 'static> Actor for Worker<H> where H: HttpHandler + 'static {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.update_time(ctx);
}
}
impl<H> Handler<Conn<net::TcpStream>> for Worker<H>
where H: HttpHandler + 'static,
{
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>)
{
if !self.settings.keep_alive_enabled() &&
msg.io.set_keepalive(Some(time::Duration::new(75, 0))).is_err()
{
error!("Can not set socket keep-alive option");
}
self.handler.handle(Rc::clone(&self.settings), &self.hnd, msg);
}
}
/// `StopWorker` message handler
impl<H> Handler<StopWorker> for Worker<H>
where H: HttpHandler + 'static,
{
type Result = Response<Self, StopWorker>;
fn handle(&mut self, msg: StopWorker, ctx: &mut Context<Self>) -> Self::Result {
let num = self.settings.num_channels();
if num == 0 {
info!("Shutting down http worker, 0 connections");
Self::reply(Ok(true))
} else if let Some(dur) = msg.graceful {
info!("Graceful http worker shutdown, {} connections", num);
let (tx, rx) = oneshot::channel();
self.shutdown_timeout(ctx, tx, dur);
Self::async_reply(rx.map_err(|_| ()).actfuture())
} else {
info!("Force shutdown http worker, {} connections", num);
self.settings.head().traverse::<TcpStream, H>();
Self::reply(Ok(false))
}
}
}
#[derive(Clone)]
pub(crate) enum StreamHandlerType {
Normal,
#[cfg(feature="tls")]
Tls(TlsAcceptor),
#[cfg(feature="alpn")]
Alpn(SslAcceptor),
}
impl StreamHandlerType {
fn handle<H: HttpHandler>(&mut self,
h: Rc<WorkerSettings<H>>,
hnd: &Handle, msg: Conn<net::TcpStream>) {
match *self {
StreamHandlerType::Normal => {
let _ = msg.io.set_nodelay(true);
let io = TcpStream::from_stream(msg.io, hnd)
.expect("failed to associate TCP stream");
hnd.spawn(HttpChannel::new(h, io, msg.peer, msg.http2));
}
#[cfg(feature="tls")]
StreamHandlerType::Tls(ref acceptor) => {
let Conn { io, peer, http2 } = msg;
let _ = io.set_nodelay(true);
let io = TcpStream::from_stream(io, hnd)
.expect("failed to associate TCP stream");
hnd.spawn(
TlsAcceptorExt::accept_async(acceptor, io).then(move |res| {
match res {
Ok(io) => Arbiter::handle().spawn(
HttpChannel::new(h, io, peer, http2)),
Err(err) =>
trace!("Error during handling tls connection: {}", err),
};
future::result(Ok(()))
})
);
}
#[cfg(feature="alpn")]
StreamHandlerType::Alpn(ref acceptor) => {
let Conn { io, peer, .. } = msg;
let _ = io.set_nodelay(true);
let io = TcpStream::from_stream(io, hnd)
.expect("failed to associate TCP stream");
hnd.spawn(
SslAcceptorExt::accept_async(acceptor, io).then(move |res| {
match res {
Ok(io) => {
let http2 = if let Some(p) = io.get_ref().ssl().selected_alpn_protocol()
{
p.len() == 2 && &p == b"h2"
} else {
false
};
Arbiter::handle().spawn(HttpChannel::new(h, io, peer, http2));
},
Err(err) =>
trace!("Error during handling tls connection: {}", err),
};
future::result(Ok(()))
})
);
}
}
}
}