1
0
mirror of https://github.com/actix/actix-extras.git synced 2025-06-30 20:04:26 +02:00

add rustfmt config

This commit is contained in:
Nikolay Kim
2018-04-13 16:02:01 -07:00
parent 95f6277007
commit 113f5ad1a8
91 changed files with 8057 additions and 5509 deletions

View File

@ -1,17 +1,16 @@
use std::{ptr, mem, time, io};
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::net::{SocketAddr, Shutdown};
use std::{io, mem, ptr, time};
use bytes::{Bytes, BytesMut, Buf, BufMut};
use futures::{Future, Poll, Async};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use super::{h1, h2, utils, HttpHandler, IoStream};
use super::settings::WorkerSettings;
use super::{utils, HttpHandler, IoStream, h1, h2};
const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0";
enum HttpProtocol<T: IoStream, H: 'static> {
H1(h1::Http1<T, H>),
H2(h2::Http2<T, H>),
@ -24,27 +23,47 @@ enum ProtocolKind {
}
#[doc(hidden)]
pub struct HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'static {
pub struct HttpChannel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
proto: Option<HttpProtocol<T, H>>,
node: Option<Node<HttpChannel<T, H>>>,
}
impl<T, H> HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'static
impl<T, H> HttpChannel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(settings: Rc<WorkerSettings<H>>,
mut io: T, peer: Option<SocketAddr>, http2: bool) -> HttpChannel<T, H>
{
pub(crate) fn new(
settings: Rc<WorkerSettings<H>>, mut io: T, peer: Option<SocketAddr>,
http2: bool,
) -> HttpChannel<T, H> {
settings.add_channel();
let _ = io.set_nodelay(true);
if http2 {
HttpChannel {
node: None, proto: Some(HttpProtocol::H2(
h2::Http2::new(settings, io, peer, Bytes::new()))) }
node: None,
proto: Some(HttpProtocol::H2(h2::Http2::new(
settings,
io,
peer,
Bytes::new(),
))),
}
} else {
HttpChannel {
node: None, proto: Some(HttpProtocol::Unknown(
settings, peer, io, BytesMut::with_capacity(4096))) }
node: None,
proto: Some(HttpProtocol::Unknown(
settings,
peer,
io,
BytesMut::with_capacity(4096),
)),
}
}
}
@ -55,15 +74,16 @@ impl<T, H> HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'static
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
Some(HttpProtocol::H2(ref mut h2)) => {
h2.shutdown()
}
Some(HttpProtocol::H2(ref mut h2)) => h2.shutdown(),
_ => (),
}
}
}
impl<T, H> Future for HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'static
impl<T, H> Future for HttpChannel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
type Item = ();
type Error = ();
@ -73,12 +93,15 @@ impl<T, H> Future for HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'sta
let el = self as *mut _;
self.node = Some(Node::new(el));
let _ = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) =>
self.node.as_ref().map(|n| h1.settings().head().insert(n)),
Some(HttpProtocol::H2(ref mut h2)) =>
self.node.as_ref().map(|n| h2.settings().head().insert(n)),
Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) =>
self.node.as_ref().map(|n| settings.head().insert(n)),
Some(HttpProtocol::H1(ref mut h1)) => self.node
.as_ref()
.map(|n| h1.settings().head().insert(n)),
Some(HttpProtocol::H2(ref mut h2)) => self.node
.as_ref()
.map(|n| h2.settings().head().insert(n)),
Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) => {
self.node.as_ref().map(|n| settings.head().insert(n))
}
None => unreachable!(),
};
}
@ -90,30 +113,35 @@ impl<T, H> Future for HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'sta
Ok(Async::Ready(())) | Err(_) => {
h1.settings().remove_channel();
self.node.as_mut().map(|n| n.remove());
},
}
_ => (),
}
return result
},
return result;
}
Some(HttpProtocol::H2(ref mut h2)) => {
let result = h2.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
h2.settings().remove_channel();
self.node.as_mut().map(|n| n.remove());
},
}
_ => (),
}
return result
},
Some(HttpProtocol::Unknown(ref mut settings, _, ref mut io, ref mut buf)) => {
return result;
}
Some(HttpProtocol::Unknown(
ref mut settings,
_,
ref mut io,
ref mut buf,
)) => {
match utils::read_from_io(io, buf) {
Ok(Async::Ready(0)) | Err(_) => {
debug!("Ignored premature client disconnection");
settings.remove_channel();
self.node.as_mut().map(|n| n.remove());
return Err(())
},
return Err(());
}
_ => (),
}
@ -126,7 +154,7 @@ impl<T, H> Future for HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'sta
} else {
return Ok(Async::NotReady);
}
},
}
None => unreachable!(),
};
@ -134,30 +162,36 @@ impl<T, H> Future for HttpChannel<T, H> where T: IoStream, H: HttpHandler + 'sta
if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() {
match kind {
ProtocolKind::Http1 => {
self.proto = Some(
HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf)));
return self.poll()
},
self.proto = Some(HttpProtocol::H1(h1::Http1::new(
settings,
io,
addr,
buf,
)));
return self.poll();
}
ProtocolKind::Http2 => {
self.proto = Some(
HttpProtocol::H2(h2::Http2::new(settings, io, addr, buf.freeze())));
return self.poll()
},
self.proto = Some(HttpProtocol::H2(h2::Http2::new(
settings,
io,
addr,
buf.freeze(),
)));
return self.poll();
}
}
}
unreachable!()
}
}
pub(crate) struct Node<T>
{
pub(crate) struct Node<T> {
next: Option<*mut Node<()>>,
prev: Option<*mut Node<()>>,
element: *mut T,
}
impl<T> Node<T>
{
impl<T> Node<T> {
fn new(el: *mut T) -> Self {
Node {
next: None,
@ -194,9 +228,7 @@ impl<T> Node<T>
}
}
impl Node<()> {
pub(crate) fn head() -> Self {
Node {
next: None,
@ -205,7 +237,11 @@ impl Node<()> {
}
}
pub(crate) fn traverse<T, H>(&self) where T: IoStream, H: HttpHandler + 'static {
pub(crate) fn traverse<T, H>(&self)
where
T: IoStream,
H: HttpHandler + 'static,
{
let mut next = self.next.as_ref();
loop {
if let Some(n) = next {
@ -214,30 +250,39 @@ impl Node<()> {
next = n.next.as_ref();
if !n.element.is_null() {
let ch: &mut HttpChannel<T, H> = mem::transmute(
&mut *(n.element as *mut _));
let ch: &mut HttpChannel<T, H> =
mem::transmute(&mut *(n.element as *mut _));
ch.shutdown();
}
}
} else {
return
return;
}
}
}
}
/// Wrapper for `AsyncRead + AsyncWrite` types
pub(crate) struct WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
io: T,
pub(crate) struct WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
io: T,
}
impl<T> WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
pub fn new(io: T) -> Self {
WrapperStream{ io }
WrapperStream { io }
}
}
impl<T> IoStream for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> IoStream for WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
#[inline]
fn shutdown(&mut self, _: Shutdown) -> io::Result<()> {
Ok(())
@ -252,14 +297,20 @@ impl<T> IoStream for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static
}
}
impl<T> io::Read for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> io::Read for WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.read(buf)
}
}
impl<T> io::Write for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> io::Write for WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.write(buf)
@ -270,14 +321,20 @@ impl<T> io::Write for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static
}
}
impl<T> AsyncRead for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> AsyncRead for WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
#[inline]
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
self.io.read_buf(buf)
}
}
impl<T> AsyncWrite for WrapperStream<T> where T: AsyncRead + AsyncWrite + 'static {
impl<T> AsyncWrite for WrapperStream<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
#[inline]
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.shutdown()

View File

@ -1,25 +1,24 @@
use std::{io, cmp, mem};
use std::io::{Read, Write};
use std::fmt::Write as FmtWrite;
use std::io::{Read, Write};
use std::str::FromStr;
use std::{cmp, io, mem};
use bytes::{Bytes, BytesMut, BufMut};
use http::{Version, Method, HttpTryFrom};
use http::header::{HeaderMap, HeaderValue,
ACCEPT_ENCODING, CONNECTION,
CONTENT_ENCODING, CONTENT_LENGTH, TRANSFER_ENCODING};
#[cfg(feature = "brotli")]
use brotli2::write::{BrotliDecoder, BrotliEncoder};
use bytes::{BufMut, Bytes, BytesMut};
use flate2::Compression;
use flate2::read::GzDecoder;
use flate2::write::{GzEncoder, DeflateDecoder, DeflateEncoder};
#[cfg(feature="brotli")]
use brotli2::write::{BrotliDecoder, BrotliEncoder};
use flate2::write::{DeflateDecoder, DeflateEncoder, GzEncoder};
use http::header::{HeaderMap, HeaderValue, ACCEPT_ENCODING, CONNECTION,
CONTENT_ENCODING, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{HttpTryFrom, Method, Version};
use header::ContentEncoding;
use body::{Body, Binary};
use body::{Binary, Body};
use error::PayloadError;
use header::ContentEncoding;
use httprequest::HttpInnerMessage;
use httpresponse::HttpResponse;
use payload::{PayloadSender, PayloadWriter, PayloadStatus};
use payload::{PayloadSender, PayloadStatus, PayloadWriter};
use super::shared::SharedBytes;
@ -29,7 +28,6 @@ pub(crate) enum PayloadType {
}
impl PayloadType {
pub fn new(headers: &HeaderMap, sender: PayloadSender) -> PayloadType {
// check content-encoding
let enc = if let Some(enc) = headers.get(CONTENT_ENCODING) {
@ -43,8 +41,9 @@ impl PayloadType {
};
match enc {
ContentEncoding::Auto | ContentEncoding::Identity =>
PayloadType::Sender(sender),
ContentEncoding::Auto | ContentEncoding::Identity => {
PayloadType::Sender(sender)
}
_ => PayloadType::Encoding(Box::new(EncodedPayload::new(sender, enc))),
}
}
@ -84,7 +83,6 @@ impl PayloadWriter for PayloadType {
}
}
/// Payload wrapper with content decompression support
pub(crate) struct EncodedPayload {
inner: PayloadSender,
@ -94,12 +92,15 @@ pub(crate) struct EncodedPayload {
impl EncodedPayload {
pub fn new(inner: PayloadSender, enc: ContentEncoding) -> EncodedPayload {
EncodedPayload{ inner, error: false, payload: PayloadStream::new(enc) }
EncodedPayload {
inner,
error: false,
payload: PayloadStream::new(enc),
}
}
}
impl PayloadWriter for EncodedPayload {
fn set_error(&mut self, err: PayloadError) {
self.inner.set_error(err)
}
@ -110,7 +111,7 @@ impl PayloadWriter for EncodedPayload {
Err(err) => {
self.error = true;
self.set_error(PayloadError::Io(err));
},
}
Ok(value) => {
if let Some(b) = value {
self.inner.feed_data(b);
@ -123,7 +124,7 @@ impl PayloadWriter for EncodedPayload {
fn feed_data(&mut self, data: Bytes) {
if self.error {
return
return;
}
match self.payload.feed_data(data) {
@ -145,7 +146,7 @@ impl PayloadWriter for EncodedPayload {
pub(crate) enum Decoder {
Deflate(Box<DeflateDecoder<Writer>>),
Gzip(Option<Box<GzDecoder<Wrapper>>>),
#[cfg(feature="brotli")]
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
Identity,
}
@ -190,7 +191,9 @@ pub(crate) struct Writer {
impl Writer {
fn new() -> Writer {
Writer{buf: BytesMut::with_capacity(8192)}
Writer {
buf: BytesMut::with_capacity(8192),
}
}
fn take(&mut self) -> Bytes {
self.buf.take().freeze()
@ -216,65 +219,64 @@ pub(crate) struct PayloadStream {
impl PayloadStream {
pub fn new(enc: ContentEncoding) -> PayloadStream {
let dec = match enc {
#[cfg(feature="brotli")]
ContentEncoding::Br => Decoder::Br(
Box::new(BrotliDecoder::new(Writer::new()))),
ContentEncoding::Deflate => Decoder::Deflate(
Box::new(DeflateDecoder::new(Writer::new()))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Decoder::Br(Box::new(BrotliDecoder::new(Writer::new())))
}
ContentEncoding::Deflate => {
Decoder::Deflate(Box::new(DeflateDecoder::new(Writer::new())))
}
ContentEncoding::Gzip => Decoder::Gzip(None),
_ => Decoder::Identity,
};
PayloadStream{ decoder: dec, dst: BytesMut::new() }
PayloadStream {
decoder: dec,
dst: BytesMut::new(),
}
}
}
impl PayloadStream {
pub fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self.decoder {
#[cfg(feature="brotli")]
Decoder::Br(ref mut decoder) => {
match decoder.finish() {
Ok(mut writer) => {
let b = writer.take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
},
Err(e) => Err(e),
#[cfg(feature = "brotli")]
Decoder::Br(ref mut decoder) => match decoder.finish() {
Ok(mut writer) => {
let b = writer.take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
Decoder::Gzip(ref mut decoder) => {
if let Some(ref mut decoder) = *decoder {
decoder.as_mut().get_mut().eof = true;
self.dst.reserve(8192);
match decoder.read(unsafe{self.dst.bytes_mut()}) {
Ok(n) => {
unsafe{self.dst.advance_mut(n)};
return Ok(Some(self.dst.take().freeze()))
match decoder.read(unsafe { self.dst.bytes_mut() }) {
Ok(n) => {
unsafe { self.dst.advance_mut(n) };
return Ok(Some(self.dst.take().freeze()));
}
Err(e) =>
return Err(e),
Err(e) => return Err(e),
}
} else {
Ok(None)
}
},
Decoder::Deflate(ref mut decoder) => {
match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
},
Err(e) => Err(e),
}
Decoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
Decoder::Identity => Ok(None),
}
@ -282,66 +284,67 @@ impl PayloadStream {
pub fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self.decoder {
#[cfg(feature="brotli")]
Decoder::Br(ref mut decoder) => {
match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
},
Err(e) => Err(e)
#[cfg(feature = "brotli")]
Decoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
Decoder::Gzip(ref mut decoder) => {
if decoder.is_none() {
*decoder = Some(
Box::new(GzDecoder::new(
Wrapper{buf: BytesMut::from(data), eof: false})));
*decoder = Some(Box::new(GzDecoder::new(Wrapper {
buf: BytesMut::from(data),
eof: false,
})));
} else {
let _ = decoder.as_mut().unwrap().write(&data);
}
loop {
self.dst.reserve(8192);
match decoder.as_mut()
.as_mut().unwrap().read(unsafe{self.dst.bytes_mut()})
match decoder
.as_mut()
.as_mut()
.unwrap()
.read(unsafe { self.dst.bytes_mut() })
{
Ok(n) => {
Ok(n) => {
if n != 0 {
unsafe{self.dst.advance_mut(n)};
unsafe { self.dst.advance_mut(n) };
}
if n == 0 {
return Ok(Some(self.dst.take().freeze()));
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock && !self.dst.is_empty()
if e.kind() == io::ErrorKind::WouldBlock
&& !self.dst.is_empty()
{
return Ok(Some(self.dst.take().freeze()));
}
return Err(e)
return Err(e);
}
}
}
},
Decoder::Deflate(ref mut decoder) => {
match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
},
Err(e) => Err(e),
}
Decoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
Decoder::Identity => Ok(Some(data)),
}
@ -351,33 +354,33 @@ impl PayloadStream {
pub(crate) enum ContentEncoder {
Deflate(DeflateEncoder<TransferEncoding>),
Gzip(GzEncoder<TransferEncoding>),
#[cfg(feature="brotli")]
#[cfg(feature = "brotli")]
Br(BrotliEncoder<TransferEncoding>),
Identity(TransferEncoding),
}
impl ContentEncoder {
pub fn empty(bytes: SharedBytes) -> ContentEncoder {
ContentEncoder::Identity(TransferEncoding::eof(bytes))
}
pub fn for_server(buf: SharedBytes,
req: &HttpInnerMessage,
resp: &mut HttpResponse,
response_encoding: ContentEncoding) -> ContentEncoder
{
pub fn for_server(
buf: SharedBytes, req: &HttpInnerMessage, resp: &mut HttpResponse,
response_encoding: ContentEncoding,
) -> ContentEncoder {
let version = resp.version().unwrap_or_else(|| req.version);
let is_head = req.method == Method::HEAD;
let mut body = resp.replace_body(Body::Empty);
let has_body = match body {
Body::Empty => false,
Body::Binary(ref bin) =>
!(response_encoding == ContentEncoding::Auto && bin.len() < 96),
Body::Binary(ref bin) => {
!(response_encoding == ContentEncoding::Auto && bin.len() < 96)
}
_ => true,
};
// Enable content encoding only if response does not contain Content-Encoding header
// Enable content encoding only if response does not contain Content-Encoding
// header
let mut encoding = if has_body {
let encoding = match response_encoding {
ContentEncoding::Auto => {
@ -396,7 +399,9 @@ impl ContentEncoder {
};
if encoding.is_compression() {
resp.headers_mut().insert(
CONTENT_ENCODING, HeaderValue::from_static(encoding.as_str()));
CONTENT_ENCODING,
HeaderValue::from_static(encoding.as_str()),
);
}
encoding
} else {
@ -409,23 +414,27 @@ impl ContentEncoder {
resp.headers_mut().remove(CONTENT_LENGTH);
}
TransferEncoding::length(0, buf)
},
}
Body::Binary(ref mut bytes) => {
if !(encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto)
|| encoding == ContentEncoding::Auto)
{
let tmp = SharedBytes::default();
let transfer = TransferEncoding::eof(tmp.clone());
let mut enc = match encoding {
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::fast())),
ContentEncoding::Gzip => ContentEncoder::Gzip(
GzEncoder::new(transfer, Compression::fast())),
#[cfg(feature="brotli")]
ContentEncoding::Br => ContentEncoder::Br(
BrotliEncoder::new(transfer, 3)),
DeflateEncoder::new(transfer, Compression::fast()),
),
ContentEncoding::Gzip => ContentEncoder::Gzip(GzEncoder::new(
transfer,
Compression::fast(),
)),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
ContentEncoder::Br(BrotliEncoder::new(transfer, 3))
}
ContentEncoding::Identity => ContentEncoder::Identity(transfer),
ContentEncoding::Auto => unreachable!()
ContentEncoding::Auto => unreachable!(),
};
// TODO return error!
let _ = enc.write(bytes.clone());
@ -438,7 +447,9 @@ impl ContentEncoder {
let mut b = BytesMut::new();
let _ = write!(b, "{}", bytes.len());
resp.headers_mut().insert(
CONTENT_LENGTH, HeaderValue::try_from(b.freeze()).unwrap());
CONTENT_LENGTH,
HeaderValue::try_from(b.freeze()).unwrap(),
);
} else {
// resp.headers_mut().remove(CONTENT_LENGTH);
}
@ -449,8 +460,8 @@ impl ContentEncoder {
if version == Version::HTTP_2 {
error!("Connection upgrade is forbidden for HTTP/2");
} else {
resp.headers_mut().insert(
CONNECTION, HeaderValue::from_static("upgrade"));
resp.headers_mut()
.insert(CONNECTION, HeaderValue::from_static("upgrade"));
}
if encoding != ContentEncoding::Identity {
encoding = ContentEncoding::Identity;
@ -470,20 +481,24 @@ impl ContentEncoder {
}
match encoding {
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::fast())),
ContentEncoding::Gzip => ContentEncoder::Gzip(
GzEncoder::new(transfer, Compression::fast())),
#[cfg(feature="brotli")]
ContentEncoding::Br => ContentEncoder::Br(
BrotliEncoder::new(transfer, 3)),
ContentEncoding::Identity | ContentEncoding::Auto =>
ContentEncoder::Identity(transfer),
ContentEncoding::Deflate => ContentEncoder::Deflate(DeflateEncoder::new(
transfer,
Compression::fast(),
)),
ContentEncoding::Gzip => {
ContentEncoder::Gzip(GzEncoder::new(transfer, Compression::fast()))
}
#[cfg(feature = "brotli")]
ContentEncoding::Br => ContentEncoder::Br(BrotliEncoder::new(transfer, 3)),
ContentEncoding::Identity | ContentEncoding::Auto => {
ContentEncoder::Identity(transfer)
}
}
}
fn streaming_encoding(buf: SharedBytes, version: Version,
resp: &mut HttpResponse) -> TransferEncoding {
fn streaming_encoding(
buf: SharedBytes, version: Version, resp: &mut HttpResponse
) -> TransferEncoding {
match resp.chunked() {
Some(true) => {
// Enable transfer encoding
@ -492,13 +507,12 @@ impl ContentEncoder {
resp.headers_mut().remove(TRANSFER_ENCODING);
TransferEncoding::eof(buf)
} else {
resp.headers_mut().insert(
TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
resp.headers_mut()
.insert(TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
TransferEncoding::chunked(buf)
}
},
Some(false) =>
TransferEncoding::eof(buf),
}
Some(false) => TransferEncoding::eof(buf),
None => {
// if Content-Length is specified, then use it as length hint
let (len, chunked) =
@ -530,9 +544,11 @@ impl ContentEncoder {
match version {
Version::HTTP_11 => {
resp.headers_mut().insert(
TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
TRANSFER_ENCODING,
HeaderValue::from_static("chunked"),
);
TransferEncoding::chunked(buf)
},
}
_ => {
resp.headers_mut().remove(TRANSFER_ENCODING);
TransferEncoding::eof(buf)
@ -545,11 +561,10 @@ impl ContentEncoder {
}
impl ContentEncoder {
#[inline]
pub fn is_eof(&self) -> bool {
match *self {
#[cfg(feature="brotli")]
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref encoder) => encoder.get_ref().is_eof(),
ContentEncoder::Deflate(ref encoder) => encoder.get_ref().is_eof(),
ContentEncoder::Gzip(ref encoder) => encoder.get_ref().is_eof(),
@ -561,39 +576,35 @@ impl ContentEncoder {
#[inline(always)]
pub fn write_eof(&mut self) -> Result<(), io::Error> {
let encoder = mem::replace(
self, ContentEncoder::Identity(TransferEncoding::eof(SharedBytes::empty())));
self,
ContentEncoder::Identity(TransferEncoding::eof(SharedBytes::empty())),
);
match encoder {
#[cfg(feature="brotli")]
ContentEncoder::Br(encoder) => {
match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
},
Err(err) => Err(err),
}
}
ContentEncoder::Gzip(encoder) => {
match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
},
Err(err) => Err(err),
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
}
Err(err) => Err(err),
},
ContentEncoder::Deflate(encoder) => {
match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
},
Err(err) => Err(err),
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
}
Err(err) => Err(err),
},
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(mut writer) => {
writer.encode_eof();
*self = ContentEncoder::Identity(writer);
Ok(())
}
Err(err) => Err(err),
},
ContentEncoder::Identity(mut writer) => {
writer.encode_eof();
@ -607,23 +618,23 @@ impl ContentEncoder {
#[inline(always)]
pub fn write(&mut self, data: Binary) -> Result<(), io::Error> {
match *self {
#[cfg(feature="brotli")]
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => {
match encoder.write_all(data.as_ref()) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
},
}
}
},
}
ContentEncoder::Gzip(ref mut encoder) => {
match encoder.write_all(data.as_ref()) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding gzip encoding: {}", err);
Err(err)
},
}
}
}
ContentEncoder::Deflate(ref mut encoder) => {
@ -632,7 +643,7 @@ impl ContentEncoder {
Err(err) => {
trace!("Error decoding deflate encoding: {}", err);
Err(err)
},
}
}
}
ContentEncoder::Identity(ref mut encoder) => {
@ -665,7 +676,6 @@ enum TransferEncodingKind {
}
impl TransferEncoding {
#[inline]
pub fn eof(bytes: SharedBytes) -> TransferEncoding {
TransferEncoding {
@ -707,7 +717,7 @@ impl TransferEncoding {
let eof = msg.is_empty();
self.buffer.extend(msg);
Ok(eof)
},
}
TransferEncodingKind::Chunked(ref mut eof) => {
if *eof {
return Ok(true);
@ -726,21 +736,22 @@ impl TransferEncoding {
self.buffer.extend_from_slice(b"\r\n");
}
Ok(*eof)
},
}
TransferEncodingKind::Length(ref mut remaining) => {
if *remaining > 0 {
if msg.is_empty() {
return Ok(*remaining == 0)
return Ok(*remaining == 0);
}
let len = cmp::min(*remaining, msg.len() as u64);
self.buffer.extend(msg.take().split_to(len as usize).into());
self.buffer
.extend(msg.take().split_to(len as usize).into());
*remaining -= len as u64;
Ok(*remaining == 0)
} else {
Ok(true)
}
},
}
}
}
@ -754,13 +765,12 @@ impl TransferEncoding {
*eof = true;
self.buffer.extend_from_slice(b"0\r\n\r\n");
}
},
}
}
}
}
impl io::Write for TransferEncoding {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.encode(Binary::from_slice(buf))?;
@ -773,7 +783,6 @@ impl io::Write for TransferEncoding {
}
}
struct AcceptEncoding {
encoding: ContentEncoding,
quality: f64,
@ -817,27 +826,31 @@ impl AcceptEncoding {
_ => match f64::from_str(parts[1]) {
Ok(q) => q,
Err(_) => 0.0,
}
},
};
Some(AcceptEncoding{ encoding, quality })
Some(AcceptEncoding {
encoding,
quality,
})
}
/// Parse a raw Accept-Encoding header value into an ordered list.
pub fn parse(raw: &str) -> ContentEncoding {
let mut encodings: Vec<_> =
raw.replace(' ', "").split(',').map(|l| AcceptEncoding::new(l)).collect();
let mut encodings: Vec<_> = raw.replace(' ', "")
.split(',')
.map(|l| AcceptEncoding::new(l))
.collect();
encodings.sort();
for enc in encodings {
if let Some(enc) = enc {
return enc.encoding
return enc.encoding;
}
}
ContentEncoding::Identity
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -846,9 +859,13 @@ mod tests {
fn test_chunked_te() {
let bytes = SharedBytes::default();
let mut enc = TransferEncoding::chunked(bytes.clone());
assert!(!enc.encode(Binary::from(b"test".as_ref())).ok().unwrap());
assert!(!enc.encode(Binary::from(b"test".as_ref()))
.ok()
.unwrap());
assert!(enc.encode(Binary::from(b"".as_ref())).ok().unwrap());
assert_eq!(bytes.get_mut().take().freeze(),
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n"));
assert_eq!(
bytes.get_mut().take().freeze(),
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n")
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,22 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::{io, mem};
use std::rc::Rc;
use bytes::BufMut;
use futures::{Async, Poll};
use tokio_io::AsyncWrite;
use http::{Method, Version};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE};
use http::{Method, Version};
use std::rc::Rc;
use std::{io, mem};
use tokio_io::AsyncWrite;
use body::{Body, Binary};
use super::encoding::ContentEncoder;
use super::helpers;
use super::settings::WorkerSettings;
use super::shared::SharedBytes;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
use header::ContentEncoding;
use httprequest::HttpInnerMessage;
use httpresponse::HttpResponse;
use super::helpers;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use super::shared::SharedBytes;
use super::encoding::ContentEncoder;
use super::settings::WorkerSettings;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
@ -41,10 +41,9 @@ pub(crate) struct H1Writer<T: AsyncWrite, H: 'static> {
}
impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
pub fn new(stream: T, buf: SharedBytes, settings: Rc<WorkerSettings<H>>)
-> H1Writer<T, H>
{
pub fn new(
stream: T, buf: SharedBytes, settings: Rc<WorkerSettings<H>>
) -> H1Writer<T, H> {
H1Writer {
flags: Flags::empty(),
encoder: ContentEncoder::empty(buf.clone()),
@ -80,11 +79,11 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
match self.stream.write(&data[written..]) {
Ok(0) => {
self.disconnected();
return Err(io::Error::new(io::ErrorKind::WriteZero, ""))
},
return Err(io::Error::new(io::ErrorKind::WriteZero, ""));
}
Ok(n) => {
written += n;
},
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
return Ok(written)
}
@ -96,19 +95,18 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
}
impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
#[inline]
fn written(&self) -> u64 {
self.written
}
fn start(&mut self,
req: &mut HttpInnerMessage,
msg: &mut HttpResponse,
encoding: ContentEncoding) -> io::Result<WriterState>
{
fn start(
&mut self, req: &mut HttpInnerMessage, msg: &mut HttpResponse,
encoding: ContentEncoding,
) -> io::Result<WriterState> {
// prepare task
self.encoder = ContentEncoder::for_server(self.buffer.clone(), req, msg, encoding);
self.encoder =
ContentEncoder::for_server(self.buffer.clone(), req, msg, encoding);
if msg.keep_alive().unwrap_or_else(|| req.keep_alive()) {
self.flags.insert(Flags::STARTED | Flags::KEEPALIVE);
} else {
@ -119,15 +117,18 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let version = msg.version().unwrap_or_else(|| req.version);
if msg.upgrade() {
self.flags.insert(Flags::UPGRADE);
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("upgrade"));
msg.headers_mut()
.insert(CONNECTION, HeaderValue::from_static("upgrade"));
}
// keep-alive
else if self.flags.contains(Flags::KEEPALIVE) {
if version < Version::HTTP_11 {
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("keep-alive"));
msg.headers_mut()
.insert(CONNECTION, HeaderValue::from_static("keep-alive"));
}
} else if version >= Version::HTTP_11 {
msg.headers_mut().insert(CONNECTION, HeaderValue::from_static("close"));
msg.headers_mut()
.insert(CONNECTION, HeaderValue::from_static("close"));
}
let body = msg.replace_body(Body::Empty);
@ -137,12 +138,14 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let reason = msg.reason().as_bytes();
let mut is_bin = if let Body::Binary(ref bytes) = body {
buffer.reserve(
256 + msg.headers().len() * AVERAGE_HEADER_SIZE
+ bytes.len() + reason.len());
256 + msg.headers().len() * AVERAGE_HEADER_SIZE + bytes.len()
+ reason.len(),
);
true
} else {
buffer.reserve(
256 + msg.headers().len() * AVERAGE_HEADER_SIZE + reason.len());
256 + msg.headers().len() * AVERAGE_HEADER_SIZE + reason.len(),
);
false
};
@ -151,51 +154,50 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
SharedBytes::extend_from_slice_(buffer, reason);
match body {
Body::Empty =>
if req.method != Method::HEAD {
SharedBytes::put_slice(buffer, b"\r\ncontent-length: 0\r\n");
} else {
SharedBytes::put_slice(buffer, b"\r\n");
},
Body::Binary(ref bytes) =>
helpers::write_content_length(bytes.len(), &mut buffer),
_ =>
SharedBytes::put_slice(buffer, b"\r\n"),
Body::Empty => if req.method != Method::HEAD {
SharedBytes::put_slice(buffer, b"\r\ncontent-length: 0\r\n");
} else {
SharedBytes::put_slice(buffer, b"\r\n");
},
Body::Binary(ref bytes) => {
helpers::write_content_length(bytes.len(), &mut buffer)
}
_ => SharedBytes::put_slice(buffer, b"\r\n"),
}
// write headers
let mut pos = 0;
let mut has_date = false;
let mut remaining = buffer.remaining_mut();
let mut buf: &mut [u8] = unsafe{ mem::transmute(buffer.bytes_mut()) };
let mut buf: &mut [u8] = unsafe { mem::transmute(buffer.bytes_mut()) };
for (key, value) in msg.headers() {
if is_bin && key == CONTENT_LENGTH {
is_bin = false;
continue
continue;
}
has_date = has_date || key == DATE;
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe{buffer.advance_mut(pos)};
unsafe { buffer.advance_mut(pos) };
pos = 0;
buffer.reserve(len);
remaining = buffer.remaining_mut();
buf = unsafe{ mem::transmute(buffer.bytes_mut()) };
buf = unsafe { mem::transmute(buffer.bytes_mut()) };
}
buf[pos..pos+k.len()].copy_from_slice(k);
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos+2].copy_from_slice(b": ");
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos+v.len()].copy_from_slice(v);
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos+2].copy_from_slice(b"\r\n");
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
unsafe{buffer.advance_mut(pos)};
unsafe { buffer.advance_mut(pos) };
// optimized date header, set_date writes \r\n
if !has_date {
@ -256,8 +258,10 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
self.encoder.write_eof()?;
if !self.encoder.is_eof() {
Err(io::Error::new(io::ErrorKind::Other,
"Last payload item, but eof is not reached"))
Err(io::Error::new(
io::ErrorKind::Other,
"Last payload item, but eof is not reached",
))
} else if self.buffer.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
@ -268,11 +272,11 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
#[inline]
fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> {
if !self.buffer.is_empty() {
let buf: &[u8] = unsafe{mem::transmute(self.buffer.as_ref())};
let buf: &[u8] = unsafe { mem::transmute(self.buffer.as_ref()) };
let written = self.write_data(buf)?;
let _ = self.buffer.split_to(written);
if self.buffer.len() > self.buffer_capacity {
return Ok(Async::NotReady)
return Ok(Async::NotReady);
}
}
if shutdown {

View File

@ -1,30 +1,30 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::{io, cmp, mem};
use std::rc::Rc;
use std::io::{Read, Write};
use std::time::Duration;
use std::net::SocketAddr;
use std::collections::VecDeque;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::Duration;
use std::{cmp, io, mem};
use actix::Arbiter;
use modhttp::request::Parts;
use http2::{Reason, RecvStream};
use http2::server::{self, Connection, Handshake, SendResponse};
use bytes::{Buf, Bytes};
use futures::{Async, Poll, Future, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use futures::{Async, Future, Poll, Stream};
use http2::server::{self, Connection, Handshake, SendResponse};
use http2::{Reason, RecvStream};
use modhttp::request::Parts;
use tokio_core::reactor::Timeout;
use tokio_io::{AsyncRead, AsyncWrite};
use pipeline::Pipeline;
use error::PayloadError;
use httpmessage::HttpMessage;
use httprequest::HttpRequest;
use httpresponse::HttpResponse;
use payload::{Payload, PayloadWriter, PayloadStatus};
use payload::{Payload, PayloadStatus, PayloadWriter};
use pipeline::Pipeline;
use super::h2writer::H2Writer;
use super::encoding::PayloadType;
use super::h2writer::H2Writer;
use super::settings::WorkerSettings;
use super::{HttpHandler, HttpHandlerTask, Writer};
@ -35,9 +35,10 @@ bitflags! {
}
/// HTTP/2 Transport
pub(crate)
struct Http2<T, H>
where T: AsyncRead + AsyncWrite + 'static, H: 'static
pub(crate) struct Http2<T, H>
where
T: AsyncRead + AsyncWrite + 'static,
H: 'static,
{
flags: Flags,
settings: Rc<WorkerSettings<H>>,
@ -54,20 +55,23 @@ enum State<T: AsyncRead + AsyncWrite> {
}
impl<T, H> Http2<T, H>
where T: AsyncRead + AsyncWrite + 'static,
H: HttpHandler + 'static
where
T: AsyncRead + AsyncWrite + 'static,
H: HttpHandler + 'static,
{
pub fn new(settings: Rc<WorkerSettings<H>>,
io: T,
addr: Option<SocketAddr>, buf: Bytes) -> Self
{
Http2{ flags: Flags::empty(),
tasks: VecDeque::new(),
state: State::Handshake(
server::handshake(IoWrapper{unread: Some(buf), inner: io})),
keepalive_timer: None,
addr,
settings,
pub fn new(
settings: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes
) -> Self {
Http2 {
flags: Flags::empty(),
tasks: VecDeque::new(),
state: State::Handshake(server::handshake(IoWrapper {
unread: Some(buf),
inner: io,
})),
keepalive_timer: None,
addr,
settings,
}
}
@ -89,7 +93,7 @@ impl<T, H> Http2<T, H>
match timeout.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()))
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
@ -111,29 +115,30 @@ impl<T, H> Http2<T, H>
Ok(Async::Ready(ready)) => {
if ready {
item.flags.insert(
EntryFlags::EOF | EntryFlags::FINISHED);
EntryFlags::EOF | EntryFlags::FINISHED,
);
} else {
item.flags.insert(EntryFlags::EOF);
}
not_ready = false;
},
}
Ok(Async::NotReady) => {
if item.payload.need_read() == PayloadStatus::Read
&& !retry
{
continue
continue;
}
},
}
Err(err) => {
error!("Unhandled error: {}", err);
item.flags.insert(
EntryFlags::EOF |
EntryFlags::ERROR |
EntryFlags::WRITE_DONE);
EntryFlags::EOF | EntryFlags::ERROR
| EntryFlags::WRITE_DONE,
);
item.stream.reset(Reason::INTERNAL_ERROR);
}
}
break
break;
}
} else if !item.flags.contains(EntryFlags::FINISHED) {
match item.task.poll() {
@ -141,11 +146,12 @@ impl<T, H> Http2<T, H>
Ok(Async::Ready(_)) => {
not_ready = false;
item.flags.insert(EntryFlags::FINISHED);
},
}
Err(err) => {
item.flags.insert(
EntryFlags::ERROR | EntryFlags::WRITE_DONE |
EntryFlags::FINISHED);
EntryFlags::ERROR | EntryFlags::WRITE_DONE
| EntryFlags::FINISHED,
);
error!("Unhandled error: {}", err);
}
}
@ -167,13 +173,13 @@ impl<T, H> Http2<T, H>
// cleanup finished tasks
while !self.tasks.is_empty() {
if self.tasks[0].flags.contains(EntryFlags::EOF) &&
self.tasks[0].flags.contains(EntryFlags::WRITE_DONE) ||
self.tasks[0].flags.contains(EntryFlags::ERROR)
if self.tasks[0].flags.contains(EntryFlags::EOF)
&& self.tasks[0].flags.contains(EntryFlags::WRITE_DONE)
|| self.tasks[0].flags.contains(EntryFlags::ERROR)
{
self.tasks.pop_front();
} else {
break
break;
}
}
@ -186,7 +192,7 @@ impl<T, H> Http2<T, H>
for entry in &mut self.tasks {
entry.task.disconnected()
}
},
}
Ok(Async::Ready(Some((req, resp)))) => {
not_ready = false;
let (parts, body) = req.into_parts();
@ -194,8 +200,13 @@ impl<T, H> Http2<T, H>
// stop keepalive timer
self.keepalive_timer.take();
self.tasks.push_back(
Entry::new(parts, body, resp, self.addr, &self.settings));
self.tasks.push_back(Entry::new(
parts,
body,
resp,
self.addr,
&self.settings,
));
}
Ok(Async::NotReady) => {
// start keep-alive timer
@ -213,12 +224,13 @@ impl<T, H> Http2<T, H>
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(
|e| error!("Error during connection close: {}", e))
return conn.poll_close().map_err(|e| {
error!("Error during connection close: {}", e)
});
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady)
return Ok(Async::NotReady);
}
}
Err(err) => {
@ -228,16 +240,17 @@ impl<T, H> Http2<T, H>
entry.task.disconnected()
}
self.keepalive_timer.take();
},
}
}
}
if not_ready {
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED) {
return conn.poll_close().map_err(
|e| error!("Error during connection close: {}", e))
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED)
{
return conn.poll_close()
.map_err(|e| error!("Error during connection close: {}", e));
} else {
return Ok(Async::NotReady)
return Ok(Async::NotReady);
}
}
}
@ -246,14 +259,11 @@ impl<T, H> Http2<T, H>
// handshake
self.state = if let State::Handshake(ref mut handshake) = self.state {
match handshake.poll() {
Ok(Async::Ready(conn)) => {
State::Connection(conn)
},
Ok(Async::NotReady) =>
return Ok(Async::NotReady),
Ok(Async::Ready(conn)) => State::Connection(conn),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!("Error handling connection: {}", err);
return Err(())
return Err(());
}
}
} else {
@ -283,12 +293,12 @@ struct Entry<H: 'static> {
}
impl<H: 'static> Entry<H> {
fn new(parts: Parts,
recv: RecvStream,
resp: SendResponse<Bytes>,
addr: Option<SocketAddr>,
settings: &Rc<WorkerSettings<H>>) -> Entry<H>
where H: HttpHandler + 'static
fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: &Rc<WorkerSettings<H>>,
) -> Entry<H>
where
H: HttpHandler + 'static,
{
// Payload and Content-Encoding
let (psender, payload) = Payload::new(false);
@ -312,18 +322,22 @@ impl<H: 'static> Entry<H> {
req = match h.handle(req) {
Ok(t) => {
task = Some(t);
break
},
break;
}
Err(req) => req,
}
}
Entry {task: task.unwrap_or_else(|| Pipeline::error(HttpResponse::NotFound())),
payload: psender,
stream: H2Writer::new(
resp, settings.get_shared_bytes(), Rc::clone(settings)),
flags: EntryFlags::empty(),
recv,
Entry {
task: task.unwrap_or_else(|| Pipeline::error(HttpResponse::NotFound())),
payload: psender,
stream: H2Writer::new(
resp,
settings.get_shared_bytes(),
Rc::clone(settings),
),
flags: EntryFlags::empty(),
recv,
}
}
@ -340,14 +354,12 @@ impl<H: 'static> Entry<H> {
match self.recv.poll() {
Ok(Async::Ready(Some(chunk))) => {
self.payload.feed_data(chunk);
},
}
Ok(Async::Ready(None)) => {
self.flags.insert(EntryFlags::REOF);
},
Ok(Async::NotReady) => (),
Err(err) => {
self.payload.set_error(PayloadError::Http2(err))
}
Ok(Async::NotReady) => (),
Err(err) => self.payload.set_error(PayloadError::Http2(err)),
}
}
}

View File

@ -1,25 +1,25 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::{io, cmp};
use std::rc::Rc;
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll};
use http2::{Reason, SendStream};
use http2::server::SendResponse;
use http2::{Reason, SendStream};
use modhttp::Response;
use std::rc::Rc;
use std::{cmp, io};
use http::{Version, HttpTryFrom};
use http::header::{HeaderValue, CONNECTION, TRANSFER_ENCODING, DATE, CONTENT_LENGTH};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use http::{HttpTryFrom, Version};
use body::{Body, Binary};
use super::encoding::ContentEncoder;
use super::helpers;
use super::settings::WorkerSettings;
use super::shared::SharedBytes;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
use header::ContentEncoding;
use httprequest::HttpInnerMessage;
use httpresponse::HttpResponse;
use super::helpers;
use super::encoding::ContentEncoder;
use super::shared::SharedBytes;
use super::settings::WorkerSettings;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
const CHUNK_SIZE: usize = 16_384;
@ -44,10 +44,9 @@ pub(crate) struct H2Writer<H: 'static> {
}
impl<H: 'static> H2Writer<H> {
pub fn new(respond: SendResponse<Bytes>,
buf: SharedBytes, settings: Rc<WorkerSettings<H>>) -> H2Writer<H>
{
pub fn new(
respond: SendResponse<Bytes>, buf: SharedBytes, settings: Rc<WorkerSettings<H>>
) -> H2Writer<H> {
H2Writer {
respond,
settings,
@ -68,19 +67,18 @@ impl<H: 'static> H2Writer<H> {
}
impl<H: 'static> Writer for H2Writer<H> {
fn written(&self) -> u64 {
self.written
}
fn start(&mut self,
req: &mut HttpInnerMessage,
msg: &mut HttpResponse,
encoding: ContentEncoding) -> io::Result<WriterState>
{
fn start(
&mut self, req: &mut HttpInnerMessage, msg: &mut HttpResponse,
encoding: ContentEncoding,
) -> io::Result<WriterState> {
// prepare response
self.flags.insert(Flags::STARTED);
self.encoder = ContentEncoder::for_server(self.buffer.clone(), req, msg, encoding);
self.encoder =
ContentEncoder::for_server(self.buffer.clone(), req, msg, encoding);
if let Body::Empty = *msg.body() {
self.flags.insert(Flags::EOF);
}
@ -93,7 +91,8 @@ impl<H: 'static> Writer for H2Writer<H> {
if !msg.headers().contains_key(DATE) {
let mut bytes = BytesMut::with_capacity(29);
self.settings.set_date_simple(&mut bytes);
msg.headers_mut().insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
msg.headers_mut()
.insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
}
let body = msg.replace_body(Body::Empty);
@ -104,11 +103,13 @@ impl<H: 'static> Writer for H2Writer<H> {
let l = val.len();
msg.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(val.split_to(l-2).freeze()).unwrap());
HeaderValue::try_from(val.split_to(l - 2).freeze()).unwrap(),
);
}
Body::Empty => {
msg.headers_mut().insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
},
msg.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
}
_ => (),
}
@ -119,11 +120,11 @@ impl<H: 'static> Writer for H2Writer<H> {
resp.headers_mut().insert(key, value.clone());
}
match self.respond.send_response(resp, self.flags.contains(Flags::EOF)) {
Ok(stream) =>
self.stream = Some(stream),
Err(_) =>
return Err(io::Error::new(io::ErrorKind::Other, "err")),
match self.respond
.send_response(resp, self.flags.contains(Flags::EOF))
{
Ok(stream) => self.stream = Some(stream),
Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "err")),
}
trace!("Response: {:?}", msg);
@ -169,8 +170,10 @@ impl<H: 'static> Writer for H2Writer<H> {
self.flags.insert(Flags::EOF);
if !self.encoder.is_eof() {
Err(io::Error::new(io::ErrorKind::Other,
"Last payload item, but eof is not reached"))
Err(io::Error::new(
io::ErrorKind::Other,
"Last payload item, but eof is not reached",
))
} else if self.buffer.len() > MAX_WRITE_BUFFER_SIZE {
Ok(WriterState::Pause)
} else {
@ -197,17 +200,18 @@ impl<H: 'static> Writer for H2Writer<H> {
Ok(Async::Ready(Some(cap))) => {
let len = self.buffer.len();
let bytes = self.buffer.split_to(cmp::min(cap, len));
let eof = self.buffer.is_empty() && self.flags.contains(Flags::EOF);
let eof =
self.buffer.is_empty() && self.flags.contains(Flags::EOF);
self.written += bytes.len() as u64;
if let Err(e) = stream.send_data(bytes.freeze(), eof) {
return Err(io::Error::new(io::ErrorKind::Other, e))
return Err(io::Error::new(io::ErrorKind::Other, e));
} else if !self.buffer.is_empty() {
let cap = cmp::min(self.buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
self.flags.remove(Flags::RESERVED);
return Ok(Async::NotReady)
return Ok(Async::NotReady);
}
}
Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),

View File

@ -1,9 +1,9 @@
use std::{mem, ptr, slice};
use std::cell::RefCell;
use std::rc::Rc;
use std::collections::VecDeque;
use bytes::{BufMut, BytesMut};
use http::Version;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
use std::{mem, ptr, slice};
use httprequest::HttpInnerMessage;
@ -35,7 +35,9 @@ impl SharedMessagePool {
}
pub(crate) struct SharedHttpInnerMessage(
Option<Rc<HttpInnerMessage>>, Option<Rc<SharedMessagePool>>);
Option<Rc<HttpInnerMessage>>,
Option<Rc<SharedMessagePool>>,
);
impl Drop for SharedHttpInnerMessage {
fn drop(&mut self) {
@ -50,26 +52,25 @@ impl Drop for SharedHttpInnerMessage {
}
impl Clone for SharedHttpInnerMessage {
fn clone(&self) -> SharedHttpInnerMessage {
SharedHttpInnerMessage(self.0.clone(), self.1.clone())
}
}
impl Default for SharedHttpInnerMessage {
fn default() -> SharedHttpInnerMessage {
SharedHttpInnerMessage(Some(Rc::new(HttpInnerMessage::default())), None)
}
}
impl SharedHttpInnerMessage {
pub fn from_message(msg: HttpInnerMessage) -> SharedHttpInnerMessage {
SharedHttpInnerMessage(Some(Rc::new(msg)), None)
}
pub fn new(msg: Rc<HttpInnerMessage>, pool: Rc<SharedMessagePool>) -> SharedHttpInnerMessage {
pub fn new(
msg: Rc<HttpInnerMessage>, pool: Rc<SharedMessagePool>
) -> SharedHttpInnerMessage {
SharedHttpInnerMessage(Some(msg), Some(pool))
}
@ -78,7 +79,7 @@ impl SharedHttpInnerMessage {
#[cfg_attr(feature = "cargo-clippy", allow(mut_from_ref, inline_always))]
pub fn get_mut(&self) -> &mut HttpInnerMessage {
let r: &HttpInnerMessage = self.0.as_ref().unwrap().as_ref();
unsafe{mem::transmute(r)}
unsafe { mem::transmute(r) }
}
#[inline(always)]
@ -88,20 +89,23 @@ impl SharedHttpInnerMessage {
}
}
const DEC_DIGITS_LUT: &[u8] =
b"0001020304050607080910111213141516171819\
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; 13] = [b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1',
b' ', b' ', b' ', b' ', b' '];
let mut buf: [u8; 13] = [
b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' '
];
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {buf[5] = b'0'; buf[7] = b'9';},
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
@ -124,7 +128,11 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM
} else {
let d1 = n << 1;
curr -= 2;
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
@ -137,30 +145,41 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM
/// NOTE: bytes object has to contain enough space
pub(crate) fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [b'\r',b'\n',b'c',b'o',b'n',b't',b'e',
b'n',b't',b'-',b'l',b'e',b'n',b'g',
b't',b'h',b':',b' ',b'0',b'\r',b'\n'];
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [b'\r',b'\n',b'c',b'o',b'n',b't',b'e',
b'n',b't',b'-',b'l',b'e',b'n',b'g',
b't',b'h',b':',b' ',b'0',b'0',b'\r',b'\n'];
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), buf.as_mut_ptr().offset(18), 2);
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [b'\r',b'\n',b'c',b'o',b'n',b't',b'e',
b'n',b't',b'-',b'l',b'e',b'n',b'g',
b't',b'h',b':',b' ',b'0',b'0',b'0',b'\r',b'\n'];
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r', b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), buf.as_mut_ptr().offset(19), 2)};
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
@ -216,12 +235,13 @@ pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
}
unsafe {
bytes.extend_from_slice(
slice::from_raw_parts(buf_ptr.offset(curr), 41 - curr as usize));
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -231,33 +251,63 @@ mod tests {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_content_length(0, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 0\r\n"[..]
);
bytes.reserve(50);
write_content_length(9, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 9\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 9\r\n"[..]
);
bytes.reserve(50);
write_content_length(10, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 10\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 10\r\n"[..]
);
bytes.reserve(50);
write_content_length(99, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 99\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 99\r\n"[..]
);
bytes.reserve(50);
write_content_length(100, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 100\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 100\r\n"[..]
);
bytes.reserve(50);
write_content_length(101, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 101\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 101\r\n"[..]
);
bytes.reserve(50);
write_content_length(998, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 998\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 998\r\n"[..]
);
bytes.reserve(50);
write_content_length(1000, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 1000\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 1000\r\n"[..]
);
bytes.reserve(50);
write_content_length(1001, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 1001\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 1001\r\n"[..]
);
bytes.reserve(50);
write_content_length(5909, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
assert_eq!(
bytes.take().freeze(),
b"\r\ncontent-length: 5909\r\n"[..]
);
}
}

View File

@ -1,27 +1,27 @@
//! Http server
use std::{time, io};
use std::net::Shutdown;
use std::{io, time};
use actix;
use futures::Poll;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
mod srv;
mod worker;
mod channel;
pub(crate) mod encoding;
pub(crate) mod h1;
mod h2;
mod h1writer;
mod h2;
mod h2writer;
mod settings;
pub(crate) mod helpers;
mod settings;
pub(crate) mod shared;
mod srv;
pub(crate) mod utils;
mod worker;
pub use self::srv::HttpServer;
pub use self::settings::ServerSettings;
pub use self::srv::HttpServer;
use body::Binary;
use error::Error;
@ -56,9 +56,10 @@ pub(crate) const MAX_WRITE_BUFFER_SIZE: usize = 65_536;
/// }
/// ```
pub fn new<F, U, H>(factory: F) -> HttpServer<H>
where F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item=H> + 'static,
H: IntoHttpHandler + 'static
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
H: IntoHttpHandler + 'static,
{
HttpServer::new(factory)
}
@ -107,7 +108,7 @@ pub struct ResumeServer;
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub struct StopServer {
pub graceful: bool
pub graceful: bool,
}
impl actix::Message for StopServer {
@ -117,7 +118,6 @@ impl actix::Message for StopServer {
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Handle request
fn handle(&mut self, req: HttpRequest) -> Result<Box<HttpHandlerTask>, HttpRequest>;
}
@ -130,7 +130,6 @@ impl HttpHandler for Box<HttpHandler> {
#[doc(hidden)]
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll(&mut self) -> Poll<(), Error>;
@ -170,8 +169,10 @@ pub enum WriterState {
pub trait Writer {
fn written(&self) -> u64;
fn start(&mut self, req: &mut HttpInnerMessage, resp: &mut HttpResponse, encoding: ContentEncoding)
-> io::Result<WriterState>;
fn start(
&mut self, req: &mut HttpInnerMessage, resp: &mut HttpResponse,
encoding: ContentEncoding,
) -> io::Result<WriterState>;
fn write(&mut self, payload: Binary) -> io::Result<WriterState>;
@ -207,10 +208,10 @@ impl IoStream for TcpStream {
}
}
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
use tokio_openssl::SslStream;
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
impl IoStream for SslStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
@ -229,10 +230,10 @@ impl IoStream for SslStream<TcpStream> {
}
}
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
use tokio_tls::TlsStream;
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
impl IoStream for TlsStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {

View File

@ -1,19 +1,19 @@
use std::{fmt, mem, net};
use bytes::BytesMut;
use futures_cpupool::{Builder, CpuPool};
use http::StatusCode;
use std::cell::{Cell, RefCell, RefMut, UnsafeCell};
use std::fmt::Write;
use std::rc::Rc;
use std::sync::Arc;
use std::cell::{Cell, RefCell, RefMut, UnsafeCell};
use std::{fmt, mem, net};
use time;
use bytes::BytesMut;
use http::StatusCode;
use futures_cpupool::{Builder, CpuPool};
use super::helpers;
use super::KeepAlive;
use super::channel::Node;
use super::helpers;
use super::shared::{SharedBytes, SharedBytesPool};
use body::Body;
use httpresponse::{HttpResponse, HttpResponsePool, HttpResponseBuilder};
use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool};
/// Various server settings
#[derive(Clone)]
@ -71,9 +71,9 @@ impl Default for ServerSettings {
impl ServerSettings {
/// Crate server settings instance
pub(crate) fn new(addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool)
-> ServerSettings
{
pub(crate) fn new(
addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool
) -> ServerSettings {
let host = if let Some(ref host) = *host {
host.clone()
} else if let Some(ref addr) = addr {
@ -83,7 +83,13 @@ impl ServerSettings {
};
let cpu_pool = Arc::new(InnerCpuPool::new());
let responses = HttpResponsePool::pool();
ServerSettings { addr, secure, host, cpu_pool, responses }
ServerSettings {
addr,
secure,
host,
cpu_pool,
responses,
}
}
/// Returns the socket address of the local half of this TCP connection
@ -112,12 +118,13 @@ impl ServerSettings {
}
#[inline]
pub(crate) fn get_response_builder(&self, status: StatusCode) -> HttpResponseBuilder {
pub(crate) fn get_response_builder(
&self, status: StatusCode
) -> HttpResponseBuilder {
HttpResponsePool::get_builder(&self.responses, status)
}
}
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
@ -141,7 +148,8 @@ impl<H> WorkerSettings<H> {
};
WorkerSettings {
keep_alive, ka_enabled,
keep_alive,
ka_enabled,
h: RefCell::new(h),
bytes: Rc::new(SharedBytesPool::new()),
messages: Rc::new(helpers::SharedMessagePool::new()),
@ -176,7 +184,10 @@ impl<H> WorkerSettings<H> {
}
pub fn get_http_message(&self) -> helpers::SharedHttpInnerMessage {
helpers::SharedHttpInnerMessage::new(self.messages.get(), Rc::clone(&self.messages))
helpers::SharedHttpInnerMessage::new(
self.messages.get(),
Rc::clone(&self.messages),
)
}
pub fn add_channel(&self) {
@ -186,26 +197,26 @@ impl<H> WorkerSettings<H> {
pub fn remove_channel(&self) {
let num = self.channels.get();
if num > 0 {
self.channels.set(num-1);
self.channels.set(num - 1);
} else {
error!("Number of removed channels is bigger than added channel. Bug in actix-web");
}
}
pub fn update_date(&self) {
unsafe{&mut *self.date.get()}.update();
unsafe { &mut *self.date.get() }.update();
}
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = unsafe { mem::uninitialized() };
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(&(unsafe{&*self.date.get()}.bytes));
buf[6..35].copy_from_slice(&(unsafe { &*self.date.get() }.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
}
pub fn set_date_simple(&self, dst: &mut BytesMut) {
dst.extend_from_slice(&(unsafe{&*self.date.get()}.bytes));
dst.extend_from_slice(&(unsafe { &*self.date.get() }.bytes));
}
}
@ -216,7 +227,10 @@ struct Date {
impl Date {
fn new() -> Date {
let mut date = Date{bytes: [0; DATE_VALUE_LENGTH], pos: 0};
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
@ -235,14 +249,16 @@ impl fmt::Write for Date {
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
assert_eq!(
DATE_VALUE_LENGTH,
"Sun, 06 Nov 1994 08:49:37 GMT".len()
);
}
#[test]

View File

@ -1,12 +1,11 @@
use std::{io, mem};
use std::cell::RefCell;
use std::rc::Rc;
use std::collections::VecDeque;
use bytes::{BufMut, BytesMut};
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::Rc;
use std::{io, mem};
use body::Binary;
/// Internal use only! unsafe
#[derive(Debug)]
pub(crate) struct SharedBytesPool(RefCell<VecDeque<Rc<BytesMut>>>);
@ -34,8 +33,7 @@ impl SharedBytesPool {
}
#[derive(Debug)]
pub(crate) struct SharedBytes(
Option<Rc<BytesMut>>, Option<Rc<SharedBytesPool>>);
pub(crate) struct SharedBytes(Option<Rc<BytesMut>>, Option<Rc<SharedBytesPool>>);
impl Drop for SharedBytes {
fn drop(&mut self) {
@ -50,7 +48,6 @@ impl Drop for SharedBytes {
}
impl SharedBytes {
pub fn empty() -> Self {
SharedBytes(None, None)
}
@ -64,7 +61,7 @@ impl SharedBytes {
#[cfg_attr(feature = "cargo-clippy", allow(mut_from_ref, inline_always))]
pub(crate) fn get_mut(&self) -> &mut BytesMut {
let r: &BytesMut = self.0.as_ref().unwrap().as_ref();
unsafe{mem::transmute(r)}
unsafe { mem::transmute(r) }
}
#[inline]

View File

@ -1,31 +1,33 @@
use std::{io, net, thread};
use std::rc::Rc;
use std::sync::{Arc, mpsc as sync_mpsc};
use std::sync::{mpsc as sync_mpsc, Arc};
use std::time::Duration;
use std::{io, net, thread};
use actix::prelude::*;
use actix::actors::signal;
use futures::{Future, Sink, Stream};
use actix::prelude::*;
use futures::sync::mpsc;
use tokio_io::{AsyncRead, AsyncWrite};
use futures::{Future, Sink, Stream};
use mio;
use num_cpus;
use net2::TcpBuilder;
use num_cpus;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
use openssl::ssl::{AlpnError, SslAcceptorBuilder};
use super::channel::{HttpChannel, WrapperStream};
use super::settings::{ServerSettings, WorkerSettings};
use super::worker::{Conn, StopWorker, StreamHandlerType, Worker};
use super::{IntoHttpHandler, IoStream, KeepAlive};
use super::{PauseServer, ResumeServer, StopServer};
use super::channel::{HttpChannel, WrapperStream};
use super::worker::{Conn, Worker, StreamHandlerType, StopWorker};
use super::settings::{ServerSettings, WorkerSettings};
/// An HTTP Server
pub struct HttpServer<H> where H: IntoHttpHandler + 'static
pub struct HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
h: Option<Rc<WorkerSettings<H::Handler>>>,
threads: usize,
@ -33,7 +35,7 @@ pub struct HttpServer<H> where H: IntoHttpHandler + 'static
host: Option<String>,
keep_alive: KeepAlive,
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
#[cfg_attr(feature="cargo-clippy", allow(type_complexity))]
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
workers: Vec<(usize, Addr<Syn, Worker<H::Handler>>)>,
sockets: Vec<(net::SocketAddr, net::TcpListener)>,
accept: Vec<(mio::SetReadiness, sync_mpsc::Sender<Command>)>,
@ -44,8 +46,16 @@ pub struct HttpServer<H> where H: IntoHttpHandler + 'static
no_signals: bool,
}
unsafe impl<H> Sync for HttpServer<H> where H: IntoHttpHandler {}
unsafe impl<H> Send for HttpServer<H> where H: IntoHttpHandler {}
unsafe impl<H> Sync for HttpServer<H>
where
H: IntoHttpHandler,
{
}
unsafe impl<H> Send for HttpServer<H>
where
H: IntoHttpHandler,
{
}
#[derive(Clone)]
struct Info {
@ -57,41 +67,47 @@ enum ServerCommand {
WorkerDied(usize, Info),
}
impl<H> Actor for HttpServer<H> where H: IntoHttpHandler {
impl<H> Actor for HttpServer<H>
where
H: IntoHttpHandler,
{
type Context = Context<Self>;
}
impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
impl<H> HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
/// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self
where F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item=H> + 'static,
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || {
(factory)().into_iter().collect()
};
let f = move || (factory)().into_iter().collect();
HttpServer{ h: None,
threads: num_cpus::get(),
backlog: 2048,
host: None,
keep_alive: KeepAlive::Os,
factory: Arc::new(f),
workers: Vec::new(),
sockets: Vec::new(),
accept: Vec::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_http2: false,
no_signals: false,
HttpServer {
h: None,
threads: num_cpus::get(),
backlog: 2048,
host: None,
keep_alive: KeepAlive::Os,
factory: Arc::new(f),
workers: Vec::new(),
sockets: Vec::new(),
accept: Vec::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_http2: false,
no_signals: false,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads count.
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn threads(mut self, num: usize) -> Self {
self.threads = num;
self
@ -101,7 +117,8 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant load.
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
@ -121,9 +138,9 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url generation.
/// Check [ConnectionInfo](./dev/struct.ConnectionInfo.html#method.host) documentation
/// for more information.
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
pub fn server_hostname(mut self, val: String) -> Self {
self.host = Some(val);
self
@ -152,8 +169,9 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
@ -192,7 +210,7 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
Ok(lst) => {
succ = true;
self.sockets.push((lst.local_addr().unwrap(), lst));
},
}
Err(e) => err = Some(e),
}
}
@ -201,16 +219,19 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(io::ErrorKind::Other, "Can not bind to address."))
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(self)
}
}
fn start_workers(&mut self, settings: &ServerSettings, handler: &StreamHandlerType)
-> Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>
{
fn start_workers(
&mut self, settings: &ServerSettings, handler: &StreamHandlerType
) -> Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)> {
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
@ -223,7 +244,8 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
let apps: Vec<_> = (*factory)()
.into_iter()
.map(|h| h.into_handler(s.clone())).collect();
.map(|h| h.into_handler(s.clone()))
.collect();
ctx.add_message_stream(rx);
Worker::new(apps, h, ka)
});
@ -248,12 +270,12 @@ impl<H> HttpServer<H> where H: IntoHttpHandler + 'static
}
}
impl<H: IntoHttpHandler> HttpServer<H>
{
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections.
///
/// This method starts number of http handler workers in separate threads.
/// For each address this method starts separate thread which does `accept()` in a loop.
/// For each address this method starts separate thread which does
/// `accept()` in a loop.
///
/// This methods panics if no socket addresses get bound.
///
@ -277,8 +299,7 @@ impl<H: IntoHttpHandler> HttpServer<H>
/// let _ = sys.run(); // <- Run actix system, this method actually starts all async processes
/// }
/// ```
pub fn start(mut self) -> Addr<Syn, Self>
{
pub fn start(mut self) -> Addr<Syn, Self> {
if self.sockets.is_empty() {
panic!("HttpServer::bind() has to be called before start()");
} else {
@ -287,15 +308,22 @@ impl<H: IntoHttpHandler> HttpServer<H>
self.sockets.drain(..).collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(&settings, &StreamHandlerType::Normal);
let info = Info{addr: addrs[0].0, handler: StreamHandlerType::Normal};
let info = Info {
addr: addrs[0].0,
handler: StreamHandlerType::Normal,
};
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting server on http://{}", addr);
self.accept.push(
start_accept_thread(
sock, addr, self.backlog,
tx.clone(), info.clone(), workers.clone()));
self.accept.push(start_accept_thread(
sock,
addr,
self.backlog,
tx.clone(),
info.clone(),
workers.clone(),
));
}
// start http server actor
@ -304,16 +332,17 @@ impl<H: IntoHttpHandler> HttpServer<H>
ctx.add_stream(rx);
self
});
signals.map(|signals| signals.do_send(
signal::Subscribe(addr.clone().recipient())));
signals.map(|signals| {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
});
addr
}
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than that it is
/// similar to `start()` method. This method blocks.
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
@ -344,28 +373,38 @@ impl<H: IntoHttpHandler> HttpServer<H>
}
}
#[cfg(feature="tls")]
impl<H: IntoHttpHandler> HttpServer<H>
{
#[cfg(feature = "tls")]
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming tls connections.
pub fn start_tls(mut self, acceptor: TlsAcceptor) -> io::Result<Addr<Syn, Self>> {
if self.sockets.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "No socket addresses are bound"))
Err(io::Error::new(
io::ErrorKind::Other,
"No socket addresses are bound",
))
} else {
let (tx, rx) = mpsc::unbounded();
let addrs: Vec<(net::SocketAddr, net::TcpListener)> = self.sockets.drain(..).collect();
let addrs: Vec<(net::SocketAddr, net::TcpListener)> =
self.sockets.drain(..).collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(
&settings, &StreamHandlerType::Tls(acceptor.clone()));
let info = Info{addr: addrs[0].0, handler: StreamHandlerType::Tls(acceptor)};
let workers =
self.start_workers(&settings, &StreamHandlerType::Tls(acceptor.clone()));
let info = Info {
addr: addrs[0].0,
handler: StreamHandlerType::Tls(acceptor),
};
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting server on https://{}", addr);
self.accept.push(
start_accept_thread(
sock, addr, self.backlog,
tx.clone(), info.clone(), workers.clone()));
self.accept.push(start_accept_thread(
sock,
addr,
self.backlog,
tx.clone(),
info.clone(),
workers.clone(),
));
}
// start http server actor
@ -374,23 +413,27 @@ impl<H: IntoHttpHandler> HttpServer<H>
ctx.add_stream(rx);
self
});
signals.map(|signals| signals.do_send(
signal::Subscribe(addr.clone().recipient())));
signals.map(|signals| {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
});
Ok(addr)
}
}
}
#[cfg(feature="alpn")]
impl<H: IntoHttpHandler> HttpServer<H>
{
#[cfg(feature = "alpn")]
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn start_ssl(mut self, mut builder: SslAcceptorBuilder) -> io::Result<Addr<Syn, Self>>
{
pub fn start_ssl(
mut self, mut builder: SslAcceptorBuilder
) -> io::Result<Addr<Syn, Self>> {
if self.sockets.is_empty() {
Err(io::Error::new(io::ErrorKind::Other, "No socket addresses are bound"))
Err(io::Error::new(
io::ErrorKind::Other,
"No socket addresses are bound",
))
} else {
// alpn support
if !self.no_http2 {
@ -407,19 +450,29 @@ impl<H: IntoHttpHandler> HttpServer<H>
let (tx, rx) = mpsc::unbounded();
let acceptor = builder.build();
let addrs: Vec<(net::SocketAddr, net::TcpListener)> = self.sockets.drain(..).collect();
let addrs: Vec<(net::SocketAddr, net::TcpListener)> =
self.sockets.drain(..).collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(
&settings, &StreamHandlerType::Alpn(acceptor.clone()));
let info = Info{addr: addrs[0].0, handler: StreamHandlerType::Alpn(acceptor)};
&settings,
&StreamHandlerType::Alpn(acceptor.clone()),
);
let info = Info {
addr: addrs[0].0,
handler: StreamHandlerType::Alpn(acceptor),
};
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting server on https://{}", addr);
self.accept.push(
start_accept_thread(
sock, addr, self.backlog,
tx.clone(), info.clone(), workers.clone()));
self.accept.push(start_accept_thread(
sock,
addr,
self.backlog,
tx.clone(),
info.clone(),
workers.clone(),
));
}
// start http server actor
@ -428,22 +481,23 @@ impl<H: IntoHttpHandler> HttpServer<H>
ctx.add_stream(rx);
self
});
signals.map(|signals| signals.do_send(
signal::Subscribe(addr.clone().recipient())));
signals.map(|signals| {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
});
Ok(addr)
}
}
}
impl<H: IntoHttpHandler> HttpServer<H>
{
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, A, S>(mut self, stream: S, secure: bool) -> Addr<Syn, Self>
where S: Stream<Item=(T, A), Error=io::Error> + 'static,
T: AsyncRead + AsyncWrite + 'static,
A: 'static
where
S: Stream<Item = (T, A), Error = io::Error> + 'static,
T: AsyncRead + AsyncWrite + 'static,
A: 'static,
{
let (tx, rx) = mpsc::unbounded();
@ -452,15 +506,22 @@ impl<H: IntoHttpHandler> HttpServer<H>
self.sockets.drain(..).collect();
let settings = ServerSettings::new(Some(addrs[0].0), &self.host, false);
let workers = self.start_workers(&settings, &StreamHandlerType::Normal);
let info = Info{addr: addrs[0].0, handler: StreamHandlerType::Normal};
let info = Info {
addr: addrs[0].0,
handler: StreamHandlerType::Normal,
};
// start acceptors threads
for (addr, sock) in addrs {
info!("Starting server on http://{}", addr);
self.accept.push(
start_accept_thread(
sock, addr, self.backlog,
tx.clone(), info.clone(), workers.clone()));
self.accept.push(start_accept_thread(
sock,
addr,
self.backlog,
tx.clone(),
info.clone(),
workers.clone(),
));
}
}
@ -468,21 +529,24 @@ impl<H: IntoHttpHandler> HttpServer<H>
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter().map(|h| h.into_handler(settings.clone())).collect();
.into_iter()
.map(|h| h.into_handler(settings.clone()))
.collect();
self.h = Some(Rc::new(WorkerSettings::new(apps, self.keep_alive)));
// start server
let signals = self.subscribe_to_signals();
let addr: Addr<Syn, _> = HttpServer::create(move |ctx| {
ctx.add_stream(rx);
ctx.add_message_stream(
stream
.map_err(|_| ())
.map(move |(t, _)| Conn{io: WrapperStream::new(t), peer: None, http2: false}));
ctx.add_message_stream(stream.map_err(|_| ()).map(move |(t, _)| Conn {
io: WrapperStream::new(t),
peer: None,
http2: false,
}));
self
});
signals.map(|signals| signals.do_send(
signal::Subscribe(addr.clone().recipient())));
signals
.map(|signals| signals.do_send(signal::Subscribe(addr.clone().recipient())));
addr
}
}
@ -490,8 +554,7 @@ impl<H: IntoHttpHandler> HttpServer<H>
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and send `SystemExit(0)`
/// message to `System` actor.
impl<H: IntoHttpHandler> Handler<signal::Signal> for HttpServer<H>
{
impl<H: IntoHttpHandler> Handler<signal::Signal> for HttpServer<H> {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
@ -499,17 +562,17 @@ impl<H: IntoHttpHandler> Handler<signal::Signal> for HttpServer<H>
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: false}, ctx);
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: true}, ctx);
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer{graceful: false}, ctx);
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
@ -517,8 +580,7 @@ impl<H: IntoHttpHandler> Handler<signal::Signal> for HttpServer<H>
}
/// Commands from accept threads
impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H>
{
impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H> {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
@ -528,7 +590,7 @@ impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H>
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break
break;
}
}
@ -541,21 +603,23 @@ impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H>
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found
continue 'found;
}
}
break
break;
}
let h = info.handler;
let ka = self.keep_alive;
let factory = Arc::clone(&self.factory);
let settings = ServerSettings::new(Some(info.addr), &self.host, false);
let settings =
ServerSettings::new(Some(info.addr), &self.host, false);
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
let apps: Vec<_> = (*factory)()
.into_iter()
.map(|h| h.into_handler(settings.clone())).collect();
.map(|h| h.into_handler(settings.clone()))
.collect();
ctx.add_message_stream(rx);
Worker::new(apps, h, ka)
});
@ -566,30 +630,32 @@ impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H>
self.workers.push((new_idx, addr));
}
},
}
}
}
}
impl<T, H> Handler<Conn<T>> for HttpServer<H>
where T: IoStream,
H: IntoHttpHandler,
where
T: IoStream,
H: IntoHttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::handle().spawn(
HttpChannel::new(
Rc::clone(self.h.as_ref().unwrap()), msg.io, msg.peer, msg.http2));
Arbiter::handle().spawn(HttpChannel::new(
Rc::clone(self.h.as_ref().unwrap()),
msg.io,
msg.peer,
msg.http2,
));
}
}
impl<H: IntoHttpHandler> Handler<PauseServer> for HttpServer<H>
{
impl<H: IntoHttpHandler> Handler<PauseServer> for HttpServer<H> {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>)
{
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
for item in &self.accept {
let _ = item.1.send(Command::Pause);
let _ = item.0.set_readiness(mio::Ready::readable());
@ -597,8 +663,7 @@ impl<H: IntoHttpHandler> Handler<PauseServer> for HttpServer<H>
}
}
impl<H: IntoHttpHandler> Handler<ResumeServer> for HttpServer<H>
{
impl<H: IntoHttpHandler> Handler<ResumeServer> for HttpServer<H> {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
@ -609,8 +674,7 @@ impl<H: IntoHttpHandler> Handler<ResumeServer> for HttpServer<H>
}
}
impl<H: IntoHttpHandler> Handler<StopServer> for HttpServer<H>
{
impl<H: IntoHttpHandler> Handler<StopServer> for HttpServer<H> {
type Result = actix::Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
@ -630,7 +694,9 @@ impl<H: IntoHttpHandler> Handler<StopServer> for HttpServer<H>
};
for worker in &self.workers {
let tx2 = tx.clone();
worker.1.send(StopWorker{graceful: dur})
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
@ -645,12 +711,12 @@ impl<H: IntoHttpHandler> Handler<StopServer> for HttpServer<H>
}
}
actix::fut::ok(())
}).spawn(ctx);
})
.spawn(ctx);
}
if !self.workers.is_empty() {
Response::async(
rx.into_future().map(|_| ()).map_err(|_| ()))
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
@ -673,156 +739,184 @@ enum Command {
fn start_accept_thread(
sock: net::TcpListener, addr: net::SocketAddr, backlog: i32,
srv: mpsc::UnboundedSender<ServerCommand>, info: Info,
mut workers: Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>)
-> (mio::SetReadiness, sync_mpsc::Sender<Command>)
{
mut workers: Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>,
) -> (mio::SetReadiness, sync_mpsc::Sender<Command>) {
let (tx, rx) = sync_mpsc::channel();
let (reg, readiness) = mio::Registration::new2();
// start accept thread
#[cfg_attr(feature="cargo-clippy", allow(cyclomatic_complexity))]
let _ = thread::Builder::new().name(format!("Accept on {}", addr)).spawn(move || {
const SRV: mio::Token = mio::Token(0);
const CMD: mio::Token = mio::Token(1);
#[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))]
let _ = thread::Builder::new()
.name(format!("Accept on {}", addr))
.spawn(move || {
const SRV: mio::Token = mio::Token(0);
const CMD: mio::Token = mio::Token(1);
let mut server = Some(
mio::net::TcpListener::from_std(sock)
.expect("Can not create mio::net::TcpListener"));
let mut server = Some(
mio::net::TcpListener::from_std(sock)
.expect("Can not create mio::net::TcpListener"),
);
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start listening for incoming connections
if let Some(ref srv) = server {
if let Err(err) = poll.register(
srv, SRV, mio::Ready::readable(), mio::PollOpt::edge()) {
panic!("Can not register io: {}", err);
}
}
// Start listening for incoming commands
if let Err(err) = poll.register(&reg, CMD,
mio::Ready::readable(), mio::PollOpt::edge()) {
panic!("Can not register Registration: {}", err);
}
// Create storage for events
let mut events = mio::Events::with_capacity(128);
// Sleep on error
let sleep = Duration::from_millis(100);
let mut next = 0;
loop {
if let Err(err) = poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
match event.token() {
SRV => if let Some(ref server) = server {
loop {
match server.accept_std() {
Ok((sock, addr)) => {
let mut msg = Conn{
io: sock, peer: Some(addr), http2: false};
while !workers.is_empty() {
match workers[next].1.unbounded_send(msg) {
Ok(_) => (),
Err(err) => {
let _ = srv.unbounded_send(
ServerCommand::WorkerDied(
workers[next].0, info.clone()));
msg = err.into_inner();
workers.swap_remove(next);
if workers.is_empty() {
error!("No workers");
thread::sleep(sleep);
break
} else if workers.len() <= next {
next = 0;
}
continue
}
}
next = (next + 1) % workers.len();
break
}
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock =>
break,
Err(ref e) if connection_error(e) =>
continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// sleep after error
thread::sleep(sleep);
break
}
}
}
},
CMD => match rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => if let Some(server) = server.take() {
if let Err(err) = poll.deregister(&server) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", addr);
}
},
Command::Resume => {
let lst = create_tcp_listener(addr, backlog)
.expect("Can not create net::TcpListener");
server = Some(
mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener"));
if let Some(ref server) = server {
if let Err(err) = poll.register(
server, SRV, mio::Ready::readable(), mio::PollOpt::edge())
{
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed",
addr);
}
}
},
Command::Stop => {
if let Some(server) = server.take() {
let _ = poll.deregister(&server);
}
return
},
Command::Worker(idx, addr) => {
workers.push((idx, addr));
},
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => (),
sync_mpsc::TryRecvError::Disconnected => {
if let Some(server) = server.take() {
let _ = poll.deregister(&server);
}
return
},
}
},
_ => unreachable!(),
// Start listening for incoming connections
if let Some(ref srv) = server {
if let Err(err) =
poll.register(srv, SRV, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register io: {}", err);
}
}
}
});
// Start listening for incoming commands
if let Err(err) = poll.register(
&reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Create storage for events
let mut events = mio::Events::with_capacity(128);
// Sleep on error
let sleep = Duration::from_millis(100);
let mut next = 0;
loop {
if let Err(err) = poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
match event.token() {
SRV => if let Some(ref server) = server {
loop {
match server.accept_std() {
Ok((sock, addr)) => {
let mut msg = Conn {
io: sock,
peer: Some(addr),
http2: false,
};
while !workers.is_empty() {
match workers[next].1.unbounded_send(msg) {
Ok(_) => (),
Err(err) => {
let _ = srv.unbounded_send(
ServerCommand::WorkerDied(
workers[next].0,
info.clone(),
),
);
msg = err.into_inner();
workers.swap_remove(next);
if workers.is_empty() {
error!("No workers");
thread::sleep(sleep);
break;
} else if workers.len() <= next {
next = 0;
}
continue;
}
}
next = (next + 1) % workers.len();
break;
}
}
Err(ref e)
if e.kind() == io::ErrorKind::WouldBlock =>
{
break
}
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// sleep after error
thread::sleep(sleep);
break;
}
}
}
},
CMD => match rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => if let Some(server) = server.take() {
if let Err(err) = poll.deregister(&server) {
error!(
"Can not deregister server socket {}",
err
);
} else {
info!(
"Paused accepting connections on {}",
addr
);
}
},
Command::Resume => {
let lst = create_tcp_listener(addr, backlog)
.expect("Can not create net::TcpListener");
server = Some(
mio::net::TcpListener::from_std(lst).expect(
"Can not create mio::net::TcpListener",
),
);
if let Some(ref server) = server {
if let Err(err) = poll.register(
server,
SRV,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed",
addr);
}
}
}
Command::Stop => {
if let Some(server) = server.take() {
let _ = poll.deregister(&server);
}
return;
}
Command::Worker(idx, addr) => {
workers.push((idx, addr));
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => (),
sync_mpsc::TryRecvError::Disconnected => {
if let Some(server) = server.take() {
let _ = poll.deregister(&server);
}
return;
}
},
},
_ => unreachable!(),
}
}
}
});
(readiness, tx)
}
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32
) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
@ -840,7 +934,7 @@ fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::T
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused ||
e.kind() == io::ErrorKind::ConnectionAborted ||
e.kind() == io::ErrorKind::ConnectionReset
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}

View File

@ -1,14 +1,15 @@
use std::io;
use bytes::{BytesMut, BufMut};
use bytes::{BufMut, BytesMut};
use futures::{Async, Poll};
use std::io;
use super::IoStream;
const LW_BUFFER_SIZE: usize = 4096;
const HW_BUFFER_SIZE: usize = 32_768;
pub fn read_from_io<T: IoStream>(io: &mut T, buf: &mut BytesMut) -> Poll<usize, io::Error> {
pub fn read_from_io<T: IoStream>(
io: &mut T, buf: &mut BytesMut
) -> Poll<usize, io::Error> {
unsafe {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
@ -17,7 +18,7 @@ pub fn read_from_io<T: IoStream>(io: &mut T, buf: &mut BytesMut) -> Poll<usize,
Ok(n) => {
buf.advance_mut(n);
Ok(Async::Ready(n))
},
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
Ok(Async::NotReady)

View File

@ -1,31 +1,30 @@
use std::{net, time};
use std::rc::Rc;
use futures::Future;
use futures::unsync::oneshot;
use net2::TcpStreamExt;
use std::rc::Rc;
use std::{net, time};
use tokio_core::net::TcpStream;
use tokio_core::reactor::Handle;
use net2::TcpStreamExt;
#[cfg(any(feature="tls", feature="alpn"))]
#[cfg(any(feature = "tls", feature = "alpn"))]
use futures::future;
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
use tokio_tls::TlsAcceptorExt;
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
use openssl::ssl::SslAcceptor;
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
use tokio_openssl::SslAcceptorExt;
use actix::*;
use actix::msgs::StopArbiter;
use actix::*;
use server::{HttpHandler, KeepAlive};
use server::channel::HttpChannel;
use server::settings::WorkerSettings;
use server::{HttpHandler, KeepAlive};
#[derive(Message)]
pub(crate) struct Conn<T> {
@ -46,9 +45,12 @@ impl Message for StopWorker {
/// Http worker
///
/// Worker accepts Socket objects via unbounded channel and start requests processing.
pub(crate)
struct Worker<H> where H: HttpHandler + 'static {
/// Worker accepts Socket objects via unbounded channel and start requests
/// processing.
pub(crate) struct Worker<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
hnd: Handle,
handler: StreamHandlerType,
@ -56,10 +58,9 @@ struct Worker<H> where H: HttpHandler + 'static {
}
impl<H: HttpHandler + 'static> Worker<H> {
pub(crate) fn new(h: Vec<H>, handler: StreamHandlerType, keep_alive: KeepAlive)
-> Worker<H>
{
pub(crate) fn new(
h: Vec<H>, handler: StreamHandlerType, keep_alive: KeepAlive
) -> Worker<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
@ -76,11 +77,14 @@ impl<H: HttpHandler + 'static> Worker<H> {
fn update_time(&self, ctx: &mut Context<Self>) {
self.settings.update_date();
ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_time(ctx));
ctx.run_later(time::Duration::new(1, 0), |slf, ctx| {
slf.update_time(ctx)
});
}
fn shutdown_timeout(&self, ctx: &mut Context<Self>,
tx: oneshot::Sender<bool>, dur: time::Duration) {
fn shutdown_timeout(
&self, ctx: &mut Context<Self>, tx: oneshot::Sender<bool>, dur: time::Duration
) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.settings.num_channels();
@ -99,7 +103,10 @@ impl<H: HttpHandler + 'static> Worker<H> {
}
}
impl<H: 'static> Actor for Worker<H> where H: HttpHandler + 'static {
impl<H: 'static> Actor for Worker<H>
where
H: HttpHandler + 'static,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
@ -108,22 +115,24 @@ impl<H: 'static> Actor for Worker<H> where H: HttpHandler + 'static {
}
impl<H> Handler<Conn<net::TcpStream>> for Worker<H>
where H: HttpHandler + 'static,
where
H: HttpHandler + 'static,
{
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>)
{
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>) {
if self.tcp_ka.is_some() && msg.io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.handler.handle(Rc::clone(&self.settings), &self.hnd, msg);
self.handler
.handle(Rc::clone(&self.settings), &self.hnd, msg);
}
}
/// `StopWorker` message handler
impl<H> Handler<StopWorker> for Worker<H>
where H: HttpHandler + 'static,
where
H: HttpHandler + 'static,
{
type Result = Response<bool, ()>;
@ -148,17 +157,16 @@ impl<H> Handler<StopWorker> for Worker<H>
#[derive(Clone)]
pub(crate) enum StreamHandlerType {
Normal,
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
Tls(TlsAcceptor),
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
Alpn(SslAcceptor),
}
impl StreamHandlerType {
fn handle<H: HttpHandler>(&mut self,
h: Rc<WorkerSettings<H>>,
hnd: &Handle, msg: Conn<net::TcpStream>) {
fn handle<H: HttpHandler>(
&mut self, h: Rc<WorkerSettings<H>>, hnd: &Handle, msg: Conn<net::TcpStream>
) {
match *self {
StreamHandlerType::Normal => {
let _ = msg.io.set_nodelay(true);
@ -167,7 +175,7 @@ impl StreamHandlerType {
hnd.spawn(HttpChannel::new(h, io, msg.peer, msg.http2));
}
#[cfg(feature="tls")]
#[cfg(feature = "tls")]
StreamHandlerType::Tls(ref acceptor) => {
let Conn { io, peer, http2 } = msg;
let _ = io.set_nodelay(true);
@ -177,16 +185,21 @@ impl StreamHandlerType {
hnd.spawn(
TlsAcceptorExt::accept_async(acceptor, io).then(move |res| {
match res {
Ok(io) => Arbiter::handle().spawn(
HttpChannel::new(h, io, peer, http2)),
Err(err) =>
trace!("Error during handling tls connection: {}", err),
Ok(io) => Arbiter::handle().spawn(HttpChannel::new(
h,
io,
peer,
http2,
)),
Err(err) => {
trace!("Error during handling tls connection: {}", err)
}
};
future::result(Ok(()))
})
}),
);
}
#[cfg(feature="alpn")]
#[cfg(feature = "alpn")]
StreamHandlerType::Alpn(ref acceptor) => {
let Conn { io, peer, .. } = msg;
let _ = io.set_nodelay(true);
@ -197,20 +210,26 @@ impl StreamHandlerType {
SslAcceptorExt::accept_async(acceptor, io).then(move |res| {
match res {
Ok(io) => {
let http2 = if let Some(p) = io.get_ref().ssl().selected_alpn_protocol()
let http2 = if let Some(p) =
io.get_ref().ssl().selected_alpn_protocol()
{
p.len() == 2 && &p == b"h2"
} else {
false
};
Arbiter::handle().spawn(
HttpChannel::new(h, io, peer, http2));
},
Err(err) =>
trace!("Error during handling tls connection: {}", err),
Arbiter::handle().spawn(HttpChannel::new(
h,
io,
peer,
http2,
));
}
Err(err) => {
trace!("Error during handling tls connection: {}", err)
}
};
future::result(Ok(()))
})
}),
);
}
}