//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs) /// Mask/unmask a frame. #[inline] pub fn apply_mask(buf: &mut [u8], mask: [u8; 4]) { apply_mask_fast32(buf, mask) } /// A safe unoptimized mask application. #[inline] fn apply_mask_fallback(buf: &mut [u8], mask: [u8; 4]) { for (i, byte) in buf.iter_mut().enumerate() { *byte ^= mask[i & 3]; } } /// Faster version of `apply_mask()` which operates on 4-byte blocks. #[inline] pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) { let mask_u32 = u32::from_ne_bytes(mask); // SAFETY: // // buf is a valid slice borrowed mutably from bytes::BytesMut. // // un aligned prefix and suffix would be mask/unmask per byte. // proper aligned middle slice goes into fast path and operates on 4-byte blocks. let (mut prefix, words, mut suffix) = unsafe { buf.align_to_mut::() }; apply_mask_fallback(&mut prefix, mask); let head = prefix.len() & 3; let mask_u32 = if head > 0 { if cfg!(target_endian = "big") { mask_u32.rotate_left(8 * head as u32) } else { mask_u32.rotate_right(8 * head as u32) } } else { mask_u32 }; for word in words.iter_mut() { *word ^= mask_u32; } apply_mask_fallback(&mut suffix, mask_u32.to_ne_bytes()); } #[cfg(test)] mod tests { use super::*; // legacy test from old apply mask test. kept for now for back compat test. // TODO: remove it and favor the other test. #[test] fn test_apply_mask_legacy() { let mask = [0x6d, 0xb6, 0xb2, 0x80]; let unmasked = vec![ 0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9, 0x12, 0x03, ]; // Check masking with proper alignment. { let mut masked = unmasked.clone(); apply_mask_fallback(&mut masked, mask); let mut masked_fast = unmasked.clone(); apply_mask(&mut masked_fast, mask); assert_eq!(masked, masked_fast); } // Check masking without alignment. { let mut masked = unmasked.clone(); apply_mask_fallback(&mut masked[1..], mask); let mut masked_fast = unmasked; apply_mask(&mut masked_fast[1..], mask); assert_eq!(masked, masked_fast); } } #[test] fn test_apply_mask() { let mask = [0x6d, 0xb6, 0xb2, 0x80]; let unmasked = vec![ 0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9, 0x12, 0x03, ]; for data_len in 0..=unmasked.len() { let unmasked = &unmasked[0..data_len]; // Check masking with different alignment. for off in 0..=3 { if unmasked.len() < off { continue; } let mut masked = unmasked.to_vec(); apply_mask_fallback(&mut masked[off..], mask); let mut masked_fast = unmasked.to_vec(); apply_mask_fast32(&mut masked_fast[off..], mask); assert_eq!(masked, masked_fast); } } } }