1
0
mirror of https://github.com/actix/actix-extras.git synced 2024-12-01 02:44:37 +01:00

add check for usize cast

This commit is contained in:
Nikolay Kim 2018-07-06 07:46:47 +06:00
parent 080f232a0f
commit 7d96b92aa3
2 changed files with 58 additions and 54 deletions

View File

@ -94,9 +94,12 @@ impl Frame {
Async::Ready(None) => return Ok(Async::Ready(None)),
Async::NotReady => return Ok(Async::NotReady),
};
let len = NetworkEndian::read_uint(&buf[idx..], 8) as usize;
let len = NetworkEndian::read_uint(&buf[idx..], 8);
if len > max_size as u64 {
return Err(ProtocolError::Overflow);
}
idx += 8;
len
len as usize
} else {
len as usize
};
@ -165,9 +168,12 @@ impl Frame {
if chunk_len < 10 {
return Ok(Async::NotReady);
}
let len = NetworkEndian::read_uint(&chunk[idx..], 8) as usize;
let len = NetworkEndian::read_uint(&chunk[idx..], 8);
if len > max_size as u64 {
return Err(ProtocolError::Overflow);
}
idx += 8;
len
len as usize
} else {
len as usize
};
@ -255,6 +261,8 @@ impl Frame {
// unmask
if let Some(mask) = mask {
// Unsafe: request body stream is owned by WsStream. only one ref to
// bytes exists. Bytes object get freezed in continuous non-overlapping blocks
let p: &mut [u8] = unsafe {
let ptr: &[u8] = &data;
&mut *(ptr as *const _ as *mut _)
@ -272,7 +280,7 @@ impl Frame {
/// Parse the payload of a close frame.
pub fn parse_close_payload(payload: &Binary) -> Option<CloseReason> {
if payload.len() >= 2 {
let raw_code = NetworkEndian::read_uint(payload.as_ref(), 2) as u16;
let raw_code = NetworkEndian::read_u16(payload.as_ref());
let code = CloseCode::from(raw_code);
let description = if payload.len() > 2 {
Some(String::from_utf8_lossy(&payload.as_ref()[2..]).into())

View File

@ -4,75 +4,71 @@ use std::cmp::min;
use std::mem::uninitialized;
use std::ptr::copy_nonoverlapping;
/// Mask/unmask a frame.
#[inline]
pub fn apply_mask(buf: &mut [u8], mask: u32) {
unsafe { apply_mask_fast32(buf, mask) }
}
/// Faster version of `apply_mask()` which operates on 8-byte blocks.
///
/// unsafe because uses pointer math and bit operations for performance
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
unsafe fn apply_mask_fast32(buf: &mut [u8], mask_u32: u32) {
let mut ptr = buf.as_mut_ptr();
let mut len = buf.len();
pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
unsafe {
let mut ptr = buf.as_mut_ptr();
let mut len = buf.len();
// Possible first unaligned block.
let head = min(len, (8 - (ptr as usize & 0x7)) & 0x3);
let mask_u32 = if head > 0 {
let n = if head > 4 { head - 4 } else { head };
// Possible first unaligned block.
let head = min(len, (8 - (ptr as usize & 0x7)) & 0x3);
let mask_u32 = if head > 0 {
let n = if head > 4 { head - 4 } else { head };
let mask_u32 = if n > 0 {
xor_mem(ptr, mask_u32, n);
ptr = ptr.offset(head as isize);
len -= n;
if cfg!(target_endian = "big") {
mask_u32.rotate_left(8 * n as u32)
let mask_u32 = if n > 0 {
xor_mem(ptr, mask_u32, n);
ptr = ptr.offset(head as isize);
len -= n;
if cfg!(target_endian = "big") {
mask_u32.rotate_left(8 * n as u32)
} else {
mask_u32.rotate_right(8 * n as u32)
}
} else {
mask_u32.rotate_right(8 * n as u32)
mask_u32
};
if head > 4 {
*(ptr as *mut u32) ^= mask_u32;
ptr = ptr.offset(4);
len -= 4;
}
mask_u32
} else {
mask_u32
};
if head > 4 {
if len > 0 {
debug_assert_eq!(ptr as usize % 4, 0);
}
// Properly aligned middle of the data.
if len >= 8 {
let mut mask_u64 = mask_u32 as u64;
mask_u64 = mask_u64 << 32 | mask_u32 as u64;
while len >= 8 {
*(ptr as *mut u64) ^= mask_u64;
ptr = ptr.offset(8);
len -= 8;
}
}
while len >= 4 {
*(ptr as *mut u32) ^= mask_u32;
ptr = ptr.offset(4);
len -= 4;
}
mask_u32
} else {
mask_u32
};
if len > 0 {
debug_assert_eq!(ptr as usize % 4, 0);
}
// Properly aligned middle of the data.
if len >= 8 {
let mut mask_u64 = mask_u32 as u64;
mask_u64 = mask_u64 << 32 | mask_u32 as u64;
while len >= 8 {
*(ptr as *mut u64) ^= mask_u64;
ptr = ptr.offset(8);
len -= 8;
// Possible last block.
if len > 0 {
xor_mem(ptr, mask_u32, len);
}
}
while len >= 4 {
*(ptr as *mut u32) ^= mask_u32;
ptr = ptr.offset(4);
len -= 4;
}
// Possible last block.
if len > 0 {
xor_mem(ptr, mask_u32, len);
}
}
#[inline]