1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-02-23 04:23:02 +01:00

110 lines
3.2 KiB
Rust
Raw Normal View History

//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs)
2021-02-10 15:11:12 -08:00
/// Mask/unmask a frame.
#[inline]
2021-02-10 15:11:12 -08:00
pub fn apply_mask(buf: &mut [u8], mask: [u8; 4]) {
apply_mask_fast32(buf, mask)
}
2018-03-08 17:19:50 -08:00
2021-02-10 15:11:12 -08:00
/// A safe unoptimized mask application.
#[inline]
2021-02-10 15:11:12 -08:00
fn apply_mask_fallback(buf: &mut [u8], mask: [u8; 4]) {
for (i, byte) in buf.iter_mut().enumerate() {
*byte ^= mask[i & 3];
}
}
2018-07-06 07:46:47 +06:00
2021-02-10 15:11:12 -08:00
/// Faster version of `apply_mask()` which operates on 4-byte blocks.
#[inline]
2021-02-10 15:11:12 -08:00
pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) {
let mask_u32 = u32::from_ne_bytes(mask);
// SAFETY:
//
// buf is a valid slice borrowed mutably from bytes::BytesMut.
//
// un aligned prefix and suffix would be mask/unmask per byte.
// proper aligned middle slice goes into fast path and operates on 4-byte blocks.
let (mut prefix, words, mut suffix) = unsafe { buf.align_to_mut::<u32>() };
apply_mask_fallback(&mut prefix, mask);
let head = prefix.len() & 3;
let mask_u32 = if head > 0 {
if cfg!(target_endian = "big") {
mask_u32.rotate_left(8 * head as u32)
} else {
mask_u32.rotate_right(8 * head as u32)
}
} else {
2021-02-10 15:11:12 -08:00
mask_u32
};
for word in words.iter_mut() {
*word ^= mask_u32;
}
2021-02-10 15:11:12 -08:00
apply_mask_fallback(&mut suffix, mask_u32.to_ne_bytes());
}
#[cfg(test)]
mod tests {
2021-02-10 15:11:12 -08:00
use super::*;
2021-02-10 15:11:12 -08:00
// legacy test from old apply mask test. kept for now for back compat test.
// TODO: remove it and favor the other test.
#[test]
2021-02-10 15:11:12 -08:00
fn test_apply_mask_legacy() {
2018-04-13 16:02:01 -07:00
let mask = [0x6d, 0xb6, 0xb2, 0x80];
2018-03-08 20:39:05 -08:00
let unmasked = vec![
2018-04-13 16:02:01 -07:00
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17,
0x74, 0xf9, 0x12, 0x03,
];
// Check masking with proper alignment.
{
let mut masked = unmasked.clone();
2021-02-10 15:11:12 -08:00
apply_mask_fallback(&mut masked, mask);
let mut masked_fast = unmasked.clone();
2021-02-10 15:11:12 -08:00
apply_mask(&mut masked_fast, mask);
assert_eq!(masked, masked_fast);
}
// Check masking without alignment.
{
let mut masked = unmasked.clone();
2021-02-10 15:11:12 -08:00
apply_mask_fallback(&mut masked[1..], mask);
let mut masked_fast = unmasked;
2021-02-10 15:11:12 -08:00
apply_mask(&mut masked_fast[1..], mask);
assert_eq!(masked, masked_fast);
}
}
2021-02-10 15:11:12 -08:00
#[test]
fn test_apply_mask() {
let mask = [0x6d, 0xb6, 0xb2, 0x80];
let unmasked = vec![
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17,
0x74, 0xf9, 0x12, 0x03,
];
for data_len in 0..=unmasked.len() {
let unmasked = &unmasked[0..data_len];
// Check masking with different alignment.
for off in 0..=3 {
if unmasked.len() < off {
continue;
}
let mut masked = unmasked.to_vec();
apply_mask_fallback(&mut masked[off..], mask);
let mut masked_fast = unmasked.to_vec();
apply_mask_fast32(&mut masked_fast[off..], mask);
assert_eq!(masked, masked_fast);
}
}
}
}