diff --git a/chacha20/Cargo.toml b/chacha20/Cargo.toml index dc451c72..5cd2bf91 100644 --- a/chacha20/Cargo.toml +++ b/chacha20/Cargo.toml @@ -39,9 +39,14 @@ legacy = ["cipher"] rng = ["dep:rand_core"] xchacha = ["cipher"] -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] +[lints.rust] +missing_copy_implementations = "warn" +missing_debug_implementations = "warn" +missing_docs = "warn" +trivial_casts = "warn" +trivial_numeric_casts = "warn" +unused_lifetimes = "warn" +unused_qualifications = "warn" [lints.rust.unexpected_cfgs] level = "warn" @@ -51,4 +56,35 @@ check-cfg = [ ] [lints.clippy] +borrow_as_ptr = "warn" +cast_lossless = "warn" +cast_possible_truncation = "warn" +cast_possible_wrap = "warn" +cast_precision_loss = "warn" +cast_sign_loss = "warn" +checked_conversions = "warn" +from_iter_instead_of_collect = "warn" +implicit_saturating_sub = "warn" +manual_assert = "warn" +map_unwrap_or = "warn" +missing_errors_doc = "warn" +missing_panics_doc = "warn" +mod_module_files = "warn" +must_use_candidate = "warn" needless_range_loop = "allow" +ptr_as_ptr = "warn" +redundant_closure_for_method_calls = "warn" +ref_as_ptr = "warn" +return_self_not_must_use = "warn" +semicolon_if_nothing_returned = "warn" +trivially_copy_pass_by_ref = "warn" +std_instead_of_alloc = "warn" +std_instead_of_core = "warn" +undocumented_unsafe_blocks = "warn" +unnecessary_safety_comment = "warn" +unwrap_in_result = "warn" +unwrap_used = "warn" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/chacha20/src/backends/avx2.rs b/chacha20/src/backends/avx2.rs index 63b4fa44..2d7c3f55 100644 --- a/chacha20/src/backends/avx2.rs +++ b/chacha20/src/backends/avx2.rs @@ -1,4 +1,11 @@ -#![allow(unsafe_op_in_unsafe_fn)] +//! AVX2 backend. + +#![allow(unsafe_op_in_unsafe_fn, reason = "needs triage")] +#![allow(clippy::cast_possible_truncation, reason = "needs triage")] +#![allow(clippy::cast_possible_wrap, reason = "needs triage")] +#![allow(clippy::cast_sign_loss, reason = "needs triage")] +#![allow(clippy::undocumented_unsafe_blocks, reason = "TODO")] + use crate::{Rounds, Variant}; use core::marker::PhantomData; @@ -6,12 +13,12 @@ use core::marker::PhantomData; use crate::ChaChaCore; #[cfg(feature = "cipher")] -use crate::{chacha::Block, STATE_WORDS}; +use crate::{STATE_WORDS, chacha::Block}; #[cfg(feature = "cipher")] use cipher::{ - consts::{U4, U64}, BlockSizeUser, ParBlocks, ParBlocksSizeUser, StreamCipherBackend, StreamCipherClosure, + consts::{U4, U64}, }; #[cfg(target_arch = "x86")] @@ -34,7 +41,7 @@ where F: StreamCipherClosure, V: Variant, { - let state_ptr = state.as_ptr() as *const __m128i; + let state_ptr = state.as_ptr().cast::<__m128i>(); let v = [ _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(0))), _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(1))), @@ -44,7 +51,7 @@ where c = match size_of::() { 4 => _mm256_add_epi32(c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 0)), 8 => _mm256_add_epi64(c, _mm256_set_epi64x(0, 1, 0, 0)), - _ => unreachable!() + _ => unreachable!(), }; let mut ctr = [c; N]; for i in 0..N { @@ -65,9 +72,9 @@ where state[12] = _mm256_extract_epi32(backend.ctr[0], 0) as u32; match size_of::() { - 4 => {}, + 4 => {} 8 => state[13] = _mm256_extract_epi32(backend.ctr[0], 1) as u32, - _ => unreachable!() + _ => unreachable!(), } } @@ -79,7 +86,7 @@ where R: Rounds, V: Variant, { - let state_ptr = core.state.as_ptr() as *const __m128i; + let state_ptr = core.state.as_ptr().cast::<__m128i>(); let v = [ _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(0))), _mm256_broadcastsi128_si256(_mm_loadu_si128(state_ptr.add(1))), @@ -130,13 +137,13 @@ impl StreamCipherBackend for Backend { *c = match size_of::() { 4 => _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 1)), 8 => _mm256_add_epi64(*c, _mm256_set_epi64x(0, 1, 0, 1)), - _ => unreachable!() + _ => unreachable!(), }; } let res0: [__m128i; 8] = core::mem::transmute(res[0]); - let block_ptr = block.as_mut_ptr() as *mut __m128i; + let block_ptr = block.as_mut_ptr().cast::<__m128i>(); for i in 0..4 { _mm_storeu_si128(block_ptr.add(i), res0[2 * i]); } @@ -152,12 +159,14 @@ impl StreamCipherBackend for Backend { for c in self.ctr.iter_mut() { *c = match size_of::() { 4 => _mm256_add_epi32(*c, _mm256_set_epi32(0, 0, 0, pb, 0, 0, 0, pb)), - 8 => _mm256_add_epi64(*c, _mm256_set_epi64x(0, pb as i64, 0, pb as i64)), - _ => unreachable!() + 8 => { + _mm256_add_epi64(*c, _mm256_set_epi64x(0, i64::from(pb), 0, i64::from(pb))) + } + _ => unreachable!(), } } - let mut block_ptr = blocks.as_mut_ptr() as *mut __m128i; + let mut block_ptr = blocks.as_mut_ptr().cast::<__m128i>(); for v in vs { let t: [__m128i; 8] = core::mem::transmute(v); for i in 0..4 { @@ -179,10 +188,10 @@ impl Backend { let pb = PAR_BLOCKS as i32; for c in self.ctr.iter_mut() { - *c = _mm256_add_epi64(*c, _mm256_set_epi64x(0, pb as i64, 0, pb as i64)); + *c = _mm256_add_epi64(*c, _mm256_set_epi64x(0, i64::from(pb), 0, i64::from(pb))); } - let mut block_ptr = blocks.as_mut_ptr() as *mut __m128i; + let mut block_ptr = blocks.as_mut_ptr().cast::<__m128i>(); for v in vs { let t: [__m128i; 8] = core::mem::transmute(v); for i in 0..4 { diff --git a/chacha20/src/backends/neon.rs b/chacha20/src/backends/neon.rs index 012232ce..8f27963b 100644 --- a/chacha20/src/backends/neon.rs +++ b/chacha20/src/backends/neon.rs @@ -1,9 +1,10 @@ -#![allow(unsafe_op_in_unsafe_fn)] //! NEON-optimized implementation for aarch64 CPUs. //! //! Adapted from the Crypto++ `chacha_simd` implementation by Jack Lloyd and //! Jeffrey Walton (public domain). +#![allow(unsafe_op_in_unsafe_fn, reason = "needs triage")] + use crate::{Rounds, STATE_WORDS, Variant}; use core::{arch::aarch64::*, marker::PhantomData}; @@ -77,7 +78,7 @@ where match size_of::() { 4 => state[12] = vgetq_lane_u32(backend.state[3], 0), 8 => vst1q_u64( - state.as_mut_ptr().offset(12) as *mut u64, + state.as_mut_ptr().offset(12).cast::(), vreinterpretq_u64_u32(backend.state[3]), ), _ => unreachable!(), @@ -98,7 +99,7 @@ where backend.write_par_ks_blocks(buffer); vst1q_u64( - core.state.as_mut_ptr().offset(12) as *mut u64, + core.state.as_mut_ptr().offset(12).cast::(), vreinterpretq_u64_u32(backend.state[3]), ); } @@ -127,6 +128,8 @@ impl StreamCipherBackend for Backend { let mut par = ParBlocks::::default(); self.gen_par_ks_blocks(&mut par); *block = par[0]; + + // SAFETY: we have used conditional compilation to ensure NEON is available unsafe { self.state[3] = add_counter!(state3, vld1q_u32([1, 0, 0, 0].as_ptr()), V); } @@ -134,6 +137,7 @@ impl StreamCipherBackend for Backend { #[inline(always)] fn gen_par_ks_blocks(&mut self, dest: &mut ParBlocks) { + // SAFETY: we have used conditional compilation to ensure NEON is available unsafe { let mut blocks = [ [self.state[0], self.state[1], self.state[2], self.state[3]], @@ -176,6 +180,7 @@ impl StreamCipherBackend for Backend { } // write blocks to dest for state_row in 0..4 { + #[allow(clippy::cast_sign_loss, reason = "needs triage")] vst1q_u8( dest[block].as_mut_ptr().offset(state_row << 4), vreinterpretq_u8_u32(blocks[block][state_row as usize]), @@ -245,7 +250,7 @@ impl Backend { double_quarter_round(&mut blocks); } - let mut dest_ptr = buffer.as_mut_ptr() as *mut u8; + let mut dest_ptr = buffer.as_mut_ptr().cast::(); for block in 0..4 { // add state to block for state_row in 0..3 { @@ -261,6 +266,7 @@ impl Backend { } // write blocks to buffer for state_row in 0..4 { + #[allow(clippy::cast_sign_loss)] vst1q_u8( dest_ptr.offset(state_row << 4), vreinterpretq_u8_u32(blocks[block][state_row as usize]), diff --git a/chacha20/src/backends/soft.rs b/chacha20/src/backends/soft.rs index 3ce4f6b1..ed448d4c 100644 --- a/chacha20/src/backends/soft.rs +++ b/chacha20/src/backends/soft.rs @@ -1,5 +1,6 @@ -//! Portable implementation which does not rely on architecture-specific -//! intrinsics. +//! Portable implementation which does not rely on architecture-specific intrinsics. + +#![allow(clippy::cast_possible_truncation)] use crate::{ChaChaCore, Rounds, STATE_WORDS, Variant, quarter_round}; @@ -35,7 +36,7 @@ impl StreamCipherBackend for Backend<'_, R, V> { ctr = ctr.wrapping_add(1); self.0.state[12] = ctr as u32; if size_of::() == 8 { - self.0.state[13] = (ctr >> 32) as u32 + self.0.state[13] = (ctr >> 32) as u32; } for (chunk, val) in block.chunks_exact_mut(4).zip(res.iter()) { @@ -50,7 +51,7 @@ impl Backend<'_, R, V> { pub(crate) fn gen_ks_blocks(&mut self, buffer: &mut [u32; 64]) { for block in 0..4 { let res = run_rounds::(&self.0.state); - let mut ctr = u64::from(self.0.state[13]) << 32 | u64::from(self.0.state[12]); + let mut ctr = (u64::from(self.0.state[13]) << 32) | u64::from(self.0.state[12]); ctr = ctr.wrapping_add(1); self.0.state[12] = ctr as u32; self.0.state[13] = (ctr >> 32) as u32; diff --git a/chacha20/src/backends/sse2.rs b/chacha20/src/backends/sse2.rs index e2f66dc1..f6479a1e 100644 --- a/chacha20/src/backends/sse2.rs +++ b/chacha20/src/backends/sse2.rs @@ -1,15 +1,22 @@ -#![allow(unsafe_op_in_unsafe_fn)] +//! SSE2 backend. + +#![allow(unsafe_op_in_unsafe_fn, reason = "needs triage")] +#![allow(clippy::cast_possible_truncation, reason = "needs triage")] +#![allow(clippy::cast_possible_wrap, reason = "needs triage")] +#![allow(clippy::cast_sign_loss, reason = "needs triage")] +#![allow(clippy::undocumented_unsafe_blocks, reason = "TODO")] + use crate::{Rounds, Variant}; #[cfg(feature = "rng")] -use crate::{ChaChaCore}; +use crate::ChaChaCore; #[cfg(feature = "cipher")] -use crate::{chacha::Block, STATE_WORDS}; +use crate::{STATE_WORDS, chacha::Block}; #[cfg(feature = "cipher")] use cipher::{ - consts::{U4, U64}, BlockSizeUser, ParBlocksSizeUser, StreamCipherBackend, StreamCipherClosure, + consts::{U4, U64}, }; use core::marker::PhantomData; @@ -29,7 +36,7 @@ where F: StreamCipherClosure, V: Variant, { - let state_ptr = state.as_ptr() as *const __m128i; + let state_ptr = state.as_ptr().cast::<__m128i>(); let mut backend = Backend:: { v: [ _mm_loadu_si128(state_ptr.add(0)), @@ -44,7 +51,7 @@ where state[12] = _mm_cvtsi128_si32(backend.v[3]) as u32; if size_of::() == 8 { - state[13] = _mm_extract_epi32(backend.v[3], 1) as u32 + state[13] = _mm_extract_epi32(backend.v[3], 1) as u32; } } @@ -72,10 +79,10 @@ impl StreamCipherBackend for Backend { self.v[3] = match size_of::() { 4 => _mm_add_epi32(self.v[3], _mm_set_epi32(0, 0, 0, 1)), 8 => _mm_add_epi64(self.v[3], _mm_set_epi64x(0, 1)), - _ => unreachable!() + _ => unreachable!(), }; - let block_ptr = block.as_mut_ptr() as *mut __m128i; + let block_ptr = block.as_mut_ptr().cast::<__m128i>(); for i in 0..4 { _mm_storeu_si128(block_ptr.add(i), res[0][i]); } @@ -88,10 +95,10 @@ impl StreamCipherBackend for Backend { self.v[3] = match size_of::() { 4 => _mm_add_epi32(self.v[3], _mm_set_epi32(0, 0, 0, PAR_BLOCKS as i32)), 8 => _mm_add_epi64(self.v[3], _mm_set_epi64x(0, PAR_BLOCKS as i64)), - _ => unreachable!() + _ => unreachable!(), }; - let blocks_ptr = blocks.as_mut_ptr() as *mut __m128i; + let blocks_ptr = blocks.as_mut_ptr().cast::<__m128i>(); for block in 0..PAR_BLOCKS { for i in 0..4 { _mm_storeu_si128(blocks_ptr.add(i + block * PAR_BLOCKS), res[block][i]); @@ -109,7 +116,7 @@ where R: Rounds, V: Variant, { - let state_ptr = core.state.as_ptr() as *const __m128i; + let state_ptr = core.state.as_ptr().cast::<__m128i>(); let mut backend = Backend:: { v: [ _mm_loadu_si128(state_ptr.add(0)), @@ -135,7 +142,7 @@ impl Backend { let res = rounds::(&self.v); self.v[3] = _mm_add_epi64(self.v[3], _mm_set_epi64x(0, PAR_BLOCKS as i64)); - let blocks_ptr = block.as_mut_ptr() as *mut __m128i; + let blocks_ptr = block.as_mut_ptr().cast::<__m128i>(); for block in 0..PAR_BLOCKS { for i in 0..4 { _mm_storeu_si128(blocks_ptr.add(i + block * PAR_BLOCKS), res[block][i]); @@ -153,7 +160,7 @@ unsafe fn rounds(v: &[__m128i; 4]) -> [[__m128i; 4]; PAR_ res[block][3] = match size_of::() { 4 => _mm_add_epi32(res[block][3], _mm_set_epi32(0, 0, 0, block as i32)), 8 => _mm_add_epi64(res[block][3], _mm_set_epi64x(0, block as i64)), - _ => unreachable!() + _ => unreachable!(), } } @@ -168,7 +175,7 @@ unsafe fn rounds(v: &[__m128i; 4]) -> [[__m128i; 4]; PAR_ let ctr = match size_of::() { 4 => _mm_add_epi32(v[3], _mm_set_epi32(0, 0, 0, block as i32)), 8 => _mm_add_epi64(v[3], _mm_set_epi64x(0, block as i64)), - _ => unreachable!() + _ => unreachable!(), }; res[block][3] = _mm_add_epi32(res[block][3], ctr); } diff --git a/chacha20/src/lib.rs b/chacha20/src/lib.rs index d8149780..4a64a100 100644 --- a/chacha20/src/lib.rs +++ b/chacha20/src/lib.rs @@ -5,7 +5,6 @@ html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/8f1a9894/logo.svg", html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/8f1a9894/logo.svg" )] -#![warn(missing_docs, rust_2018_idioms, trivial_casts, unused_qualifications)] //! # Usage //! @@ -93,16 +92,7 @@ //! [Salsa]: https://en.wikipedia.org/wiki/Salsa20 //! [`chacha20poly1305`]: https://docs.rs/chacha20poly1305 -#[cfg(feature = "cipher")] -pub use cipher; -#[cfg(feature = "cipher")] -use cipher::{BlockSizeUser, StreamCipherCore, StreamCipherSeekCore, consts::U64}; - -use cfg_if::cfg_if; -use core::marker::PhantomData; - -#[cfg(feature = "zeroize")] -use zeroize::{Zeroize, ZeroizeOnDrop}; +pub mod variants; mod backends; #[cfg(feature = "cipher")] @@ -114,21 +104,28 @@ mod rng; #[cfg(feature = "xchacha")] mod xchacha; -pub mod variants; -use variants::Variant; - #[cfg(feature = "cipher")] pub use chacha::{ChaCha8, ChaCha12, ChaCha20, Key, KeyIvInit}; +#[cfg(feature = "cipher")] +pub use cipher; +#[cfg(feature = "legacy")] +pub use legacy::{ChaCha20Legacy, LegacyNonce}; #[cfg(feature = "rng")] pub use rand_core; #[cfg(feature = "rng")] pub use rng::{ChaCha8Rng, ChaCha12Rng, ChaCha20Rng}; - -#[cfg(feature = "legacy")] -pub use legacy::{ChaCha20Legacy, LegacyNonce}; #[cfg(feature = "xchacha")] pub use xchacha::{XChaCha8, XChaCha12, XChaCha20, XNonce, hchacha}; +use cfg_if::cfg_if; +use core::{fmt, marker::PhantomData}; +use variants::Variant; + +#[cfg(feature = "cipher")] +use cipher::{BlockSizeUser, StreamCipherCore, StreamCipherSeekCore, consts::U64}; +#[cfg(feature = "zeroize")] +use zeroize::{Zeroize, ZeroizeOnDrop}; + /// State initialization constant ("expand 32-byte k") #[cfg(any(feature = "cipher", feature = "rng"))] const CONSTANTS: [u32; 4] = [0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]; @@ -143,7 +140,7 @@ pub trait Rounds: Copy { } /// 8-rounds -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub struct R8; impl Rounds for R8 { @@ -151,7 +148,7 @@ impl Rounds for R8 { } /// 12-rounds -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub struct R12; impl Rounds for R12 { @@ -159,7 +156,7 @@ impl Rounds for R12 { } /// 20-rounds -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub struct R20; impl Rounds for R20 { @@ -229,13 +226,17 @@ impl ChaChaCore { const_dst.copy_from_slice(&CONSTANTS); - for (src, dst) in key.chunks_exact(4).zip(key_dst) { - *dst = u32::from_le_bytes(src.try_into().unwrap()); - } + // TODO(tarcieri): when MSRV 1.88, use `[T]::as_chunks` to avoid panic + #[allow(clippy::unwrap_used, reason = "MSRV TODO")] + { + for (src, dst) in key.chunks_exact(4).zip(key_dst) { + *dst = u32::from_le_bytes(src.try_into().unwrap()); + } - assert_eq!(size_of_val(iv_dst), size_of_val(iv)); - for (src, dst) in iv.chunks_exact(4).zip(iv_dst) { - *dst = u32::from_le_bytes(src.try_into().unwrap()); + assert_eq!(size_of_val(iv_dst), size_of_val(iv)); + for (src, dst) in iv.chunks_exact(4).zip(iv_dst) { + *dst = u32::from_le_bytes(src.try_into().unwrap()); + } } cfg_if! { @@ -269,6 +270,7 @@ impl ChaChaCore { /// Get the current block position. #[cfg(any(feature = "cipher", feature = "rng"))] #[inline(always)] + #[must_use] pub fn get_block_pos(&self) -> V::Counter { V::get_block_pos(&self.state[12..]) } @@ -281,6 +283,17 @@ impl ChaChaCore { } } +impl fmt::Debug for ChaChaCore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ChaChaCore {{ ... }}", + R::COUNT, + size_of::() * 8 + ) + } +} + #[cfg(feature = "cipher")] impl StreamCipherSeekCore for ChaChaCore { type Counter = V::Counter; @@ -292,7 +305,7 @@ impl StreamCipherSeekCore for ChaChaCore { #[inline(always)] fn set_block_pos(&mut self, pos: Self::Counter) { - self.set_block_pos(pos) + self.set_block_pos(pos); } } @@ -332,16 +345,19 @@ impl StreamCipherCore for ChaChaCore { #[cfg(chacha20_avx512)] if avx512_token.get() { + // SAFETY: runtime CPU feature detection above ensures this is valid unsafe { backends::avx512::inner::(&mut self.state, f); } return; } if avx2_token.get() { + // SAFETY: runtime CPU feature detection above ensures this is valid unsafe { backends::avx2::inner::(&mut self.state, f); } } else if sse2_token.get() { + // SAFETY: runtime CPU feature detection above ensures this is valid unsafe { backends::sse2::inner::(&mut self.state, f); } @@ -351,6 +367,7 @@ impl StreamCipherCore for ChaChaCore { } } } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] { + // SAFETY: we have used conditional compilation to ensure NEON is available unsafe { backends::neon::inner::(&mut self.state, f); } diff --git a/chacha20/src/rng.rs b/chacha20/src/rng.rs index 19cae5d8..a600bc8b 100644 --- a/chacha20/src/rng.rs +++ b/chacha20/src/rng.rs @@ -6,7 +6,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::{convert::Infallible, fmt::Debug}; +#![allow(clippy::cast_possible_truncation, reason = "needs triage")] +#![allow(clippy::undocumented_unsafe_blocks, reason = "TODO")] + +use core::{ + convert::Infallible, + fmt::{self, Debug}, +}; use rand_core::{ SeedableRng, TryCryptoRng, TryRng, @@ -28,7 +34,8 @@ pub(crate) const BLOCK_WORDS: u8 = 16; /// The seed for ChaCha20. Implements ZeroizeOnDrop when the /// zeroize feature is enabled. -#[derive(PartialEq, Eq, Default, Clone)] +#[derive(Clone, Default, Eq, PartialEq)] +#[allow(missing_copy_implementations)] pub struct Seed([u8; 32]); impl AsRef<[u8; 32]> for Seed { @@ -49,6 +56,12 @@ impl AsMut<[u8]> for Seed { } } +impl Debug for Seed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Seed").finish_non_exhaustive() + } +} + impl From<[u8; 32]> for Seed { #[cfg(feature = "zeroize")] fn from(mut value: [u8; 32]) -> Self { @@ -71,12 +84,6 @@ impl Drop for Seed { #[cfg(feature = "zeroize")] impl ZeroizeOnDrop for Seed {} -impl Debug for Seed { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.0.fmt(f) - } -} - /// A wrapper for `stream_id` (64-bits). /// /// Can be constructed from any of the following: @@ -85,6 +92,7 @@ impl Debug for Seed { /// * `[u8; 8]` /// /// The arrays should be in little endian order. +#[derive(Clone, Copy, Debug)] pub struct StreamId([u32; Self::LEN]); impl StreamId { @@ -106,6 +114,9 @@ impl From<[u8; Self::BYTES]> for StreamId { #[inline] fn from(value: [u8; Self::BYTES]) -> Self { let mut result = Self(Default::default()); + + // TODO(tarcieri): when MSRV 1.88, use `[T]::as_chunks` to avoid panic + #[allow(clippy::unwrap_used, reason = "MSRV TODO")] for (cur, chunk) in result .0 .iter_mut() @@ -120,8 +131,7 @@ impl From<[u8; Self::BYTES]> for StreamId { impl From for StreamId { #[inline] fn from(value: u64) -> Self { - let result: [u8; Self::BYTES] = value.to_le_bytes()[..Self::BYTES].try_into().unwrap(); - result.into() + Self([(value & 0xFFFF_FFFF) as u32, (value >> 32) as u32]) } } @@ -168,6 +178,7 @@ impl ChaChaCore { } } } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] { + // SAFETY: we have used conditional compilation to ensure NEON is available unsafe { backends::neon::rng_inner::(self, buffer); } @@ -313,14 +324,15 @@ macro_rules! impl_chacha_rng { /// not supported, hence the result can simply be multiplied by 4 to get a /// byte-offset. #[inline] + #[must_use] pub fn get_word_pos(&self) -> u128 { let mut block_counter = (u64::from(self.core.core.state[13]) << 32) | u64::from(self.core.core.state[12]); if self.core.word_offset() != 0 { - block_counter = block_counter.wrapping_sub(BUF_BLOCKS as u64); + block_counter = block_counter.wrapping_sub(u64::from(BUF_BLOCKS)); } - let word_pos = - block_counter as u128 * BLOCK_WORDS as u128 + self.core.word_offset() as u128; + let word_pos = u128::from(block_counter) * u128::from(BLOCK_WORDS) + + self.core.word_offset() as u128; // eliminate bits above the 68th bit word_pos & ((1 << 68) - 1) } @@ -335,8 +347,8 @@ macro_rules! impl_chacha_rng { /// 60 bits. #[inline] pub fn set_word_pos(&mut self, word_offset: u128) { - let index = (word_offset % BLOCK_WORDS as u128) as usize; - let counter = word_offset / BLOCK_WORDS as u128; + let index = (word_offset % u128::from(BLOCK_WORDS)) as usize; + let counter = word_offset / u128::from(BLOCK_WORDS); //self.set_block_pos(counter as u64); self.core.core.state[12] = counter as u32; self.core.core.state[13] = (counter >> 32) as u32; @@ -359,10 +371,11 @@ macro_rules! impl_chacha_rng { /// Get the block pos. #[inline] #[allow(unused)] + #[must_use] pub fn get_block_pos(&self) -> u64 { let counter = self.core.core.get_block_pos(); if self.core.word_offset() != 0 { - counter - BUF_BLOCKS as u64 + self.core.word_offset() as u64 / 16 + counter - u64::from(BUF_BLOCKS) + self.core.word_offset() as u64 / 16 } else { counter } @@ -414,6 +427,7 @@ macro_rules! impl_chacha_rng { /// Get the stream number. #[inline] + #[must_use] pub fn get_stream(&self) -> u64 { let mut result = [0u8; 8]; for (i, &big) in self.core.core.state[14..BLOCK_WORDS as usize] @@ -431,6 +445,7 @@ macro_rules! impl_chacha_rng { /// Get the seed. #[inline] + #[must_use] pub fn get_seed(&self) -> [u8; 32] { let mut result = [0u8; 32]; for (i, &big) in self.core.core.state[4..12].iter().enumerate() { @@ -665,9 +680,7 @@ pub(crate) mod tests { if first_blocks[0..64 * 4].ne(&result[64..]) { for (i, (a, b)) in first_blocks.iter().zip(result.iter().skip(64)).enumerate() { - if a.ne(b) { - panic!("i = {}\na = {}\nb = {}", i, a, b); - } + assert!(!a.ne(b), "i = {}\na = {}\nb = {}", i, a, b); } } assert_eq!(&first_blocks[0..64 * 4], &result[64..]); @@ -685,13 +698,13 @@ pub(crate) mod tests { let first_blocks_end_word_pos = rng.get_word_pos(); // get first four blocks after the supposed overflow - rng.set_block_pos(u32::MAX as u64); + rng.set_block_pos(u64::from(u32::MAX)); let mut result = [0u8; 64 * 5]; rng.fill_bytes(&mut result); assert_ne!(first_blocks_end_word_pos, rng.get_word_pos()); assert_eq!( rng.get_word_pos(), - first_blocks_end_word_pos + (1 << 32) * BLOCK_WORDS as u128 + first_blocks_end_word_pos + (1 << 32) * u128::from(BLOCK_WORDS) ); assert_ne!(&first_blocks[0..64 * 4], &result[64..]); } diff --git a/chacha20/src/variants.rs b/chacha20/src/variants.rs index b843e822..676fc24c 100644 --- a/chacha20/src/variants.rs +++ b/chacha20/src/variants.rs @@ -9,7 +9,7 @@ mod sealed { pub trait Variant: sealed::Sealed { /// The counter's type. #[cfg(not(feature = "cipher"))] - type Counter; + type Counter: Copy; /// The counter's type. #[cfg(feature = "cipher")] @@ -28,6 +28,7 @@ pub trait Variant: sealed::Sealed { } /// IETF ChaCha configuration to use a 32-bit counter and 96-bit nonce. +#[derive(Clone, Copy, Debug)] pub enum Ietf {} impl sealed::Sealed for Ietf {} @@ -54,6 +55,7 @@ impl Variant for Ietf { /// DJB variant specific features: 64-bit counter and 64-bit nonce. #[cfg(any(feature = "legacy", feature = "rng"))] +#[derive(Clone, Copy, Debug)] pub enum Legacy {} #[cfg(any(feature = "legacy", feature = "rng"))] @@ -70,8 +72,8 @@ impl Variant for Legacy { #[inline(always)] fn set_block_pos(row: &mut [u32], pos: u64) { - row[0] = (pos & 0xFFFF_FFFF).try_into().unwrap(); - row[1] = (pos >> 32).try_into().unwrap(); + row[0] = (pos & 0xFFFF_FFFF) as u32; + row[1] = (pos >> 32) as u32; } #[inline(always)] diff --git a/chacha20/src/xchacha.rs b/chacha20/src/xchacha.rs index e50c0677..73cb9f57 100644 --- a/chacha20/src/xchacha.rs +++ b/chacha20/src/xchacha.rs @@ -1,14 +1,13 @@ //! XChaCha is an extended nonce variant of ChaCha +use crate::{ + CONSTANTS, ChaChaCore, R8, R12, R20, Rounds, STATE_WORDS, quarter_round, variants::Ietf, +}; use cipher::{ BlockSizeUser, IvSizeUser, KeyIvInit, KeySizeUser, StreamCipherClosure, StreamCipherCore, StreamCipherCoreWrapper, StreamCipherSeekCore, array::Array, - consts::{U16, U24, U32, U64}, -}; - -use crate::{ - CONSTANTS, ChaChaCore, R8, R12, R20, Rounds, STATE_WORDS, quarter_round, variants::Ietf, + consts::{U4, U16, U24, U32, U64}, }; #[cfg(feature = "zeroize")] @@ -42,6 +41,7 @@ pub type XChaCha12 = StreamCipherCoreWrapper>; pub type XChaCha8 = StreamCipherCoreWrapper>; /// The XChaCha core function. +#[derive(Debug)] pub struct XChaChaCore(ChaChaCore); impl KeySizeUser for XChaChaCore { @@ -58,6 +58,7 @@ impl BlockSizeUser for XChaChaCore { impl KeyIvInit for XChaChaCore { fn new(key: &Key, iv: &XNonce) -> Self { + #[allow(clippy::unwrap_used)] let subkey = hchacha::(key, iv[..16].as_ref().try_into().unwrap()); let mut nonce = [0u8; 12]; @@ -111,17 +112,19 @@ impl ZeroizeOnDrop for XChaChaCore {} /// For more information on HSalsa on which HChaCha is based, see: /// /// +#[must_use] pub fn hchacha(key: &Key, input: &Array) -> Array { let mut state = [0u32; STATE_WORDS]; state[..4].copy_from_slice(&CONSTANTS); - let key_chunks = key.chunks_exact(4); + // TODO(tarcieri): use `[T]::as_chunks` when MSRV 1.88 + let key_chunks = Array::::slice_as_chunks(key).0; for (v, chunk) in state[4..12].iter_mut().zip(key_chunks) { - *v = u32::from_le_bytes(chunk.try_into().unwrap()); + *v = u32::from_le_bytes(chunk.0); } - let input_chunks = input.chunks_exact(4); + let input_chunks = Array::::slice_as_chunks(input).0; for (v, chunk) in state[12..16].iter_mut().zip(input_chunks) { - *v = u32::from_le_bytes(chunk.try_into().unwrap()); + *v = u32::from_le_bytes(chunk.0); } // R rounds consisting of R/2 column rounds and R/2 diagonal rounds diff --git a/chacha20/tests/kats.rs b/chacha20/tests/kats.rs index 4e4aa33c..1035af28 100644 --- a/chacha20/tests/kats.rs +++ b/chacha20/tests/kats.rs @@ -227,7 +227,7 @@ mod legacy { cipher.apply_keystream(&mut buf[middle..last]); for k in idx..last { - assert_eq!(buf[k], EXPECTED_LONG[k]) + assert_eq!(buf[k], EXPECTED_LONG[k]); } } } diff --git a/chacha20/tests/rng.rs b/chacha20/tests/rng.rs index 81e031d1..478e3145 100644 --- a/chacha20/tests/rng.rs +++ b/chacha20/tests/rng.rs @@ -1,3 +1,5 @@ +//! Random number generator tests. + #![cfg(feature = "rng")] use chacha20::{ @@ -308,7 +310,7 @@ fn count_incorrect_bytes(expected: &[u8], output: &[u8]) -> (Option, u32) .for_each(|((i, a), b)| { if a.ne(b) { if index_of_first_incorrect_word.is_none() { - index_of_first_incorrect_word = Some(i / 4) + index_of_first_incorrect_word = Some(i / 4); } num_incorrect_bytes += 1; } @@ -321,7 +323,7 @@ fn count_incorrect_bytes(expected: &[u8], output: &[u8]) -> (Option, u32) fn counter_overflow_and_diagnostics() { let mut rng = ChaCha20Rng::from_seed([0u8; 32]); let block_pos = 4294967295; - assert_eq!(block_pos, u32::MAX as u64); + assert_eq!(block_pos, u64::from(u32::MAX)); rng.set_block_pos(4294967295); let mut output = [0u8; 64 * 4]; @@ -343,7 +345,7 @@ fn counter_overflow_and_diagnostics() { "The first parblock was incorrect before overflow, indicating that ChaCha was not implemented correctly for this backend. Check the rounds() fn or the functions that it calls" ); - rng.set_block_pos(u32::MAX as u64 - 1); + rng.set_block_pos(u64::from(u32::MAX) - 1); let mut skipped_blocks = [0u8; 64 * 3]; rng.fill_bytes(&mut skipped_blocks); rng.fill_bytes(&mut output[64 * 3..]);