1
0
mirror of https://github.com/fafhrd91/actix-web synced 2024-11-25 17:02:44 +01:00
actix-web/src/client/writer.rs

328 lines
11 KiB
Rust
Raw Normal View History

2018-01-28 07:03:03 +01:00
#![allow(dead_code)]
use std::io;
use std::fmt::Write;
2018-02-19 12:11:11 +01:00
use bytes::{BytesMut, BufMut};
2018-01-28 07:03:03 +01:00
use futures::{Async, Poll};
use tokio_io::AsyncWrite;
2018-02-19 12:11:11 +01:00
use http::{Version, HttpTryFrom};
use http::header::{HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, TRANSFER_ENCODING};
use flate2::Compression;
use flate2::write::{GzEncoder, DeflateEncoder};
use brotli2::write::BrotliEncoder;
2018-01-28 07:03:03 +01:00
2018-02-19 12:11:11 +01:00
use body::{Body, Binary};
use headers::ContentEncoding;
use server::WriterState;
2018-01-28 07:03:03 +01:00
use server::shared::SharedBytes;
2018-02-19 12:11:11 +01:00
use server::encoding::{ContentEncoder, TransferEncoding};
2018-01-28 07:03:03 +01:00
use client::ClientRequest;
2018-01-28 07:03:03 +01:00
const LOW_WATERMARK: usize = 1024;
const HIGH_WATERMARK: usize = 8 * LOW_WATERMARK;
const AVERAGE_HEADER_SIZE: usize = 30;
2018-01-28 07:03:03 +01:00
bitflags! {
struct Flags: u8 {
const STARTED = 0b0000_0001;
const UPGRADE = 0b0000_0010;
const KEEPALIVE = 0b0000_0100;
const DISCONNECTED = 0b0000_1000;
}
}
pub(crate) struct HttpClientWriter {
2018-01-28 07:03:03 +01:00
flags: Flags,
written: u64,
headers_size: u32,
buffer: SharedBytes,
2018-02-19 12:11:11 +01:00
encoder: ContentEncoder,
low: usize,
high: usize,
2018-01-28 07:03:03 +01:00
}
impl HttpClientWriter {
2018-01-28 07:03:03 +01:00
pub fn new(buf: SharedBytes) -> HttpClientWriter {
2018-02-19 12:11:11 +01:00
let encoder = ContentEncoder::Identity(TransferEncoding::eof(buf.clone()));
HttpClientWriter {
2018-01-28 07:03:03 +01:00
flags: Flags::empty(),
written: 0,
headers_size: 0,
buffer: buf,
2018-02-19 12:11:11 +01:00
encoder: encoder,
low: LOW_WATERMARK,
high: HIGH_WATERMARK,
2018-01-28 07:03:03 +01:00
}
}
pub fn disconnected(&mut self) {
self.buffer.take();
}
pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
}
/// Set write buffer capacity
pub fn set_buffer_capacity(&mut self, low_watermark: usize, high_watermark: usize) {
self.low = low_watermark;
self.high = high_watermark;
}
2018-01-28 07:03:03 +01:00
fn write_to_stream<T: AsyncWrite>(&mut self, stream: &mut T) -> io::Result<WriterState> {
while !self.buffer.is_empty() {
match stream.write(self.buffer.as_ref()) {
Ok(0) => {
self.disconnected();
return Ok(WriterState::Done);
},
Ok(n) => {
let _ = self.buffer.split_to(n);
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
if self.buffer.len() > self.high {
2018-01-28 07:03:03 +01:00
return Ok(WriterState::Pause)
} else {
return Ok(WriterState::Done)
}
}
Err(err) => return Err(err),
}
}
Ok(WriterState::Done)
}
}
impl HttpClientWriter {
2018-01-28 07:03:03 +01:00
2018-02-19 12:11:11 +01:00
pub fn start(&mut self, msg: &mut ClientRequest) -> io::Result<()> {
2018-01-28 07:03:03 +01:00
// prepare task
self.flags.insert(Flags::STARTED);
2018-02-19 12:11:11 +01:00
self.encoder = content_encoder(self.buffer.clone(), msg);
2018-01-28 07:03:03 +01:00
// render message
{
let buffer = self.buffer.get_mut();
2018-02-19 12:11:11 +01:00
if let Body::Binary(ref bytes) = *msg.body() {
buffer.reserve(256 + msg.headers().len() * AVERAGE_HEADER_SIZE + bytes.len());
} else {
buffer.reserve(256 + msg.headers().len() * AVERAGE_HEADER_SIZE);
}
2018-01-28 07:03:03 +01:00
2018-02-20 07:48:27 +01:00
if msg.upgrade() {
self.flags.insert(Flags::UPGRADE);
}
2018-01-28 07:03:03 +01:00
// status line
let _ = write!(buffer, "{} {} {:?}\r\n",
2018-02-19 12:11:11 +01:00
msg.method(), msg.uri().path(), msg.version());
2018-01-28 07:03:03 +01:00
// write headers
for (key, value) in msg.headers() {
2018-01-28 07:03:03 +01:00
let v = value.as_ref();
let k = key.as_str().as_bytes();
buffer.reserve(k.len() + v.len() + 4);
buffer.put_slice(k);
buffer.put_slice(b": ");
buffer.put_slice(v);
buffer.put_slice(b"\r\n");
}
// using helpers::date is quite a lot faster
//if !msg.headers.contains_key(DATE) {
// helpers::date(&mut buffer);
//} else {
// msg eof
buffer.extend_from_slice(b"\r\n");
//}
self.headers_size = buffer.len() as u32;
2018-02-19 12:11:11 +01:00
if msg.body().is_binary() {
if let Body::Binary(bytes) = msg.replace_body(Body::Empty) {
self.written += bytes.len() as u64;
self.encoder.write(bytes)?;
}
}
2018-01-28 07:03:03 +01:00
}
2018-02-19 12:11:11 +01:00
Ok(())
2018-01-28 07:03:03 +01:00
}
2018-02-20 07:48:27 +01:00
pub fn write(&mut self, payload: Binary) -> io::Result<WriterState> {
2018-01-28 07:03:03 +01:00
self.written += payload.len() as u64;
if !self.flags.contains(Flags::DISCONNECTED) {
2018-02-20 07:48:27 +01:00
if self.flags.contains(Flags::UPGRADE) {
self.buffer.extend(payload);
} else {
self.encoder.write(payload)?;
}
2018-01-28 07:03:03 +01:00
}
if self.buffer.len() > self.high {
2018-01-28 07:03:03 +01:00
Ok(WriterState::Pause)
} else {
Ok(WriterState::Done)
}
}
2018-02-20 07:48:27 +01:00
pub fn write_eof(&mut self) -> io::Result<()> {
self.encoder.write_eof()?;
if !self.encoder.is_eof() {
Err(io::Error::new(io::ErrorKind::Other,
"Last payload item, but eof is not reached"))
2018-01-28 07:03:03 +01:00
} else {
2018-02-20 07:48:27 +01:00
Ok(())
2018-01-28 07:03:03 +01:00
}
}
#[inline]
pub fn poll_completed<T: AsyncWrite>(&mut self, stream: &mut T, shutdown: bool)
-> Poll<(), io::Error>
{
match self.write_to_stream(stream) {
Ok(WriterState::Done) => {
if shutdown {
stream.shutdown()
} else {
Ok(Async::Ready(()))
}
},
Ok(WriterState::Pause) => Ok(Async::NotReady),
Err(err) => Err(err)
}
}
}
2018-02-19 12:11:11 +01:00
fn content_encoder(buf: SharedBytes, req: &mut ClientRequest) -> ContentEncoder {
let version = req.version();
let mut body = req.replace_body(Body::Empty);
let mut encoding = req.content_encoding();
let transfer = match body {
Body::Empty => {
req.headers_mut().remove(CONTENT_LENGTH);
TransferEncoding::length(0, buf)
},
Body::Binary(ref mut bytes) => {
if encoding.is_compression() {
let tmp = SharedBytes::default();
let transfer = TransferEncoding::eof(tmp.clone());
let mut enc = match encoding {
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::default())),
ContentEncoding::Gzip => ContentEncoder::Gzip(
GzEncoder::new(transfer, Compression::default())),
ContentEncoding::Br => ContentEncoder::Br(
BrotliEncoder::new(transfer, 5)),
ContentEncoding::Identity => ContentEncoder::Identity(transfer),
ContentEncoding::Auto => unreachable!()
};
// TODO return error!
let _ = enc.write(bytes.clone());
let _ = enc.write_eof();
*bytes = Binary::from(tmp.take());
2018-02-19 22:18:18 +01:00
req.headers_mut().insert(
CONTENT_ENCODING, HeaderValue::from_static(encoding.as_str()));
2018-02-19 12:11:11 +01:00
encoding = ContentEncoding::Identity;
}
let mut b = BytesMut::new();
let _ = write!(b, "{}", bytes.len());
req.headers_mut().insert(
CONTENT_LENGTH, HeaderValue::try_from(b.freeze()).unwrap());
TransferEncoding::eof(buf)
},
Body::Streaming(_) | Body::Actor(_) => {
if req.upgrade() {
if version == Version::HTTP_2 {
error!("Connection upgrade is forbidden for HTTP/2");
} else {
req.headers_mut().insert(CONNECTION, HeaderValue::from_static("upgrade"));
}
if encoding != ContentEncoding::Identity {
encoding = ContentEncoding::Identity;
req.headers_mut().remove(CONTENT_ENCODING);
}
TransferEncoding::eof(buf)
} else {
streaming_encoding(buf, version, req)
}
}
};
2018-02-19 22:18:18 +01:00
if encoding.is_compression() {
req.headers_mut().insert(
CONTENT_ENCODING, HeaderValue::from_static(encoding.as_str()));
}
2018-02-19 12:11:11 +01:00
req.replace_body(body);
match encoding {
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::default())),
ContentEncoding::Gzip => ContentEncoder::Gzip(
GzEncoder::new(transfer, Compression::default())),
ContentEncoding::Br => ContentEncoder::Br(
BrotliEncoder::new(transfer, 5)),
ContentEncoding::Identity | ContentEncoding::Auto => ContentEncoder::Identity(transfer),
}
}
fn streaming_encoding(buf: SharedBytes, version: Version, req: &mut ClientRequest)
-> TransferEncoding {
if req.chunked() {
// Enable transfer encoding
req.headers_mut().remove(CONTENT_LENGTH);
if version == Version::HTTP_2 {
req.headers_mut().remove(TRANSFER_ENCODING);
TransferEncoding::eof(buf)
} else {
req.headers_mut().insert(
TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
TransferEncoding::chunked(buf)
}
} else {
// if Content-Length is specified, then use it as length hint
let (len, chunked) =
if let Some(len) = req.headers().get(CONTENT_LENGTH) {
// Content-Length
if let Ok(s) = len.to_str() {
if let Ok(len) = s.parse::<u64>() {
(Some(len), false)
} else {
error!("illegal Content-Length: {:?}", len);
(None, false)
}
} else {
error!("illegal Content-Length: {:?}", len);
(None, false)
}
} else {
(None, true)
};
if !chunked {
if let Some(len) = len {
TransferEncoding::length(len, buf)
} else {
TransferEncoding::eof(buf)
}
} else {
// Enable transfer encoding
match version {
Version::HTTP_11 => {
req.headers_mut().insert(
TRANSFER_ENCODING, HeaderValue::from_static("chunked"));
TransferEncoding::chunked(buf)
},
_ => {
req.headers_mut().remove(TRANSFER_ENCODING);
TransferEncoding::eof(buf)
}
}
}
}
}