From 00de91fb64e8fb7bf97ec23dbb0d7c9d6f853167 Mon Sep 17 00:00:00 2001 From: Brian Smith Date: Fri, 27 Sep 2024 14:01:33 -0700 Subject: [PATCH] `use core::mem::{size_of,align_of}` instead of qualifying. Address a new complaint from the newest Clippy. Remove the relevant outdated/misleading advice in STYLE.md. --- STYLE.md | 9 --------- src/aead/chacha/fallback.rs | 2 +- src/arithmetic/constant.rs | 3 ++- src/bssl.rs | 9 +++------ src/cpu.rs | 3 ++- src/cpu/arm.rs | 7 ++++--- src/cpu/intel.rs | 7 ++++--- src/digest/dynstate.rs | 5 +++-- src/polyfill/array_flat_map.rs | 4 ++-- src/polyfill/cstr.rs | 6 ++---- src/polyfill/notsend.rs | 4 ++-- src/polyfill/slice.rs | 6 ++++-- 12 files changed, 29 insertions(+), 36 deletions(-) diff --git a/STYLE.md b/STYLE.md index 882b7f5d8b..54e2ce1917 100644 --- a/STYLE.md +++ b/STYLE.md @@ -7,15 +7,6 @@ style guidelines for that code are in the second section of this document. *ring* usually follows the [Rust Guidelines](https://aturon.github.io/), but there are some differences and *ring* adds additional guidelines. -## Imports (`use`) and Qualification - -In general, import modules, not non-module items, e.g. `use core`, not -`use core::mem::size_of_val`. This means that the uses of such functions must -be qualified: `core::mem::size_of_val(x)`, not `size_of_val(x)`. Exceptions may -be made for things that are very annoying to qualify; for example, we usually -`use super::input::*` or `use super::input::Input` because writing things like -`input::Input` is highly annoying. - ## Submodules and file naming. In general, avoid nesting modules and avoid exporting any non-module items from diff --git a/src/aead/chacha/fallback.rs b/src/aead/chacha/fallback.rs index a90959a8ac..f706a6ffe2 100644 --- a/src/aead/chacha/fallback.rs +++ b/src/aead/chacha/fallback.rs @@ -82,7 +82,7 @@ fn chacha_core(output: &mut [u8; BLOCK_LEN], input: &State) { } output - .chunks_exact_mut(core::mem::size_of::()) + .chunks_exact_mut(size_of::()) .zip(x.iter()) .for_each(|(output, &x)| output.copy_from_slice(&x.to_le_bytes())); } diff --git a/src/arithmetic/constant.rs b/src/arithmetic/constant.rs index a3f49e6a52..1419bd7e07 100644 --- a/src/arithmetic/constant.rs +++ b/src/arithmetic/constant.rs @@ -1,4 +1,5 @@ use crate::limb::Limb; +use core::mem::size_of; const fn parse_digit(d: u8) -> u8 { match d.to_ascii_lowercase() { @@ -12,7 +13,7 @@ const fn parse_digit(d: u8) -> u8 { pub const fn limbs_from_hex(hex: &str) -> [Limb; LIMBS] { let hex = hex.as_bytes(); let mut limbs = [0; LIMBS]; - let limb_nibbles = core::mem::size_of::() * 2; + let limb_nibbles = size_of::() * 2; let mut i = 0; while i < hex.len() { diff --git a/src/bssl.rs b/src/bssl.rs index f1d0b34bec..9d958b3b6c 100644 --- a/src/bssl.rs +++ b/src/bssl.rs @@ -37,16 +37,13 @@ impl From for core::result::Result<(), error::Unspecified> { mod tests { mod result { use crate::{bssl, c}; - use core::mem; + use core::mem::{align_of, size_of}; #[test] fn size_and_alignment() { type Underlying = c::int; - assert_eq!(mem::size_of::(), mem::size_of::()); - assert_eq!( - mem::align_of::(), - mem::align_of::() - ); + assert_eq!(size_of::(), size_of::()); + assert_eq!(align_of::(), align_of::()); } #[test] diff --git a/src/cpu.rs b/src/cpu.rs index 89ec676dda..03821791ee 100644 --- a/src/cpu.rs +++ b/src/cpu.rs @@ -13,6 +13,7 @@ // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. pub(crate) use self::features::Features; +use core::mem::size_of; macro_rules! impl_get_feature { { $feature:path => $T:ident } => { @@ -96,7 +97,7 @@ mod features { } } -const _: () = assert!(core::mem::size_of::() == 0); +const _: () = assert!(size_of::() == 0); cfg_if::cfg_if! { if #[cfg(any(target_arch = "aarch64", target_arch = "arm"))] { diff --git a/src/cpu/arm.rs b/src/cpu/arm.rs index cd387f5d62..980c0a32f1 100644 --- a/src/cpu/arm.rs +++ b/src/cpu/arm.rs @@ -13,6 +13,8 @@ // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. mod abi_assumptions { + use corE::mem::size_of; + // TODO: Support ARM64_32; see // https://github.com/briansmith/ring/issues/1832#issuecomment-1892928147. This also requires // replacing all `cfg(target_pointer_width)` logic for non-pointer/reference things @@ -21,9 +23,8 @@ mod abi_assumptions { const _ASSUMED_POINTER_SIZE: usize = 8; #[cfg(target_arch = "arm")] const _ASSUMED_POINTER_SIZE: usize = 4; - const _ASSUMED_USIZE_SIZE: () = assert!(core::mem::size_of::() == _ASSUMED_POINTER_SIZE); - const _ASSUMED_REF_SIZE: () = - assert!(core::mem::size_of::<&'static u8>() == _ASSUMED_POINTER_SIZE); + const _ASSUMED_USIZE_SIZE: () = assert!(size_of::() == _ASSUMED_POINTER_SIZE); + const _ASSUMED_REF_SIZE: () = assert!(size_of::<&'static u8>() == _ASSUMED_POINTER_SIZE); // To support big-endian, we'd need to make several changes as described in // https://github.com/briansmith/ring/issues/1832. diff --git a/src/cpu/intel.rs b/src/cpu/intel.rs index 172fe47bef..4b5be35543 100644 --- a/src/cpu/intel.rs +++ b/src/cpu/intel.rs @@ -15,6 +15,8 @@ use cfg_if::cfg_if; mod abi_assumptions { + use core::mem::size_of; + // TOOD: Support targets that do not have SSE and SSE2 enabled, such as // x86_64-unknown-linux-none. See // https://github.com/briansmith/ring/issues/1793#issuecomment-1793243725, @@ -27,9 +29,8 @@ mod abi_assumptions { const _ASSUMED_POINTER_SIZE: usize = 8; #[cfg(target_arch = "x86")] const _ASSUMED_POINTER_SIZE: usize = 4; - const _ASSUMED_USIZE_SIZE: () = assert!(core::mem::size_of::() == _ASSUMED_POINTER_SIZE); - const _ASSUMED_REF_SIZE: () = - assert!(core::mem::size_of::<&'static u8>() == _ASSUMED_POINTER_SIZE); + const _ASSUMED_USIZE_SIZE: () = assert!(size_of::() == _ASSUMED_POINTER_SIZE); + const _ASSUMED_REF_SIZE: () = assert!(size_of::<&'static u8>() == _ASSUMED_POINTER_SIZE); const _ASSUMED_ENDIANNESS: () = assert!(cfg!(target_endian = "little")); } diff --git a/src/digest/dynstate.rs b/src/digest/dynstate.rs index 7a2b342d8c..a121a0a8bd 100644 --- a/src/digest/dynstate.rs +++ b/src/digest/dynstate.rs @@ -14,6 +14,7 @@ use super::{format_output, sha1, sha2, Output}; use crate::{cpu, polyfill::slice}; +use core::mem::size_of; // Invariant: When constructed with `new32` (resp. `new64`), `As32` (resp. // `As64`) is the active variant. @@ -92,7 +93,7 @@ pub(super) fn sha256_format_output(state: DynState) -> Output { unreachable!(); } }; - format_output::<_, _, { core::mem::size_of::() }>(state, u32::to_be_bytes) + format_output::<_, _, { size_of::() }>(state, u32::to_be_bytes) } pub(super) fn sha512_format_output(state: DynState) -> Output { @@ -102,5 +103,5 @@ pub(super) fn sha512_format_output(state: DynState) -> Output { unreachable!(); } }; - format_output::<_, _, { core::mem::size_of::() }>(state, u64::to_be_bytes) + format_output::<_, _, { size_of::() }>(state, u64::to_be_bytes) } diff --git a/src/polyfill/array_flat_map.rs b/src/polyfill/array_flat_map.rs index 8e68b7feff..d4a8e95e81 100644 --- a/src/polyfill/array_flat_map.rs +++ b/src/polyfill/array_flat_map.rs @@ -109,7 +109,7 @@ mod tests { } impl ExactSizeIterator for DownwardCounter {} - const MAX: usize = usize::MAX / core::mem::size_of::(); + const MAX: usize = usize::MAX / size_of::(); static TEST_CASES: &[(usize, bool)] = &[(MAX, true), (MAX + 1, false)]; TEST_CASES.iter().copied().for_each(|(input_len, is_some)| { @@ -119,7 +119,7 @@ mod tests { let mapped = ArrayFlatMap::new(inner, usize::to_be_bytes); assert_eq!(mapped.is_some(), is_some); if let Some(mapped) = mapped { - assert_eq!(mapped.len(), input_len * core::mem::size_of::()); + assert_eq!(mapped.len(), input_len * size_of::()); } }); } diff --git a/src/polyfill/cstr.rs b/src/polyfill/cstr.rs index 2cbf2dd2e4..54dced4c0b 100644 --- a/src/polyfill/cstr.rs +++ b/src/polyfill/cstr.rs @@ -37,10 +37,8 @@ pub struct Ref(&'static [u8]); impl Ref { #[inline(always)] pub fn as_ptr(&self) -> *const c_char { - const _SAME_ALIGNMENT: () = - assert!(core::mem::align_of::() == core::mem::align_of::()); - const _SAME_SIZE: () = - assert!(core::mem::size_of::() == core::mem::size_of::()); + const _SAME_ALIGNMENT: () = assert!(align_of::() == align_of::()); + const _SAME_SIZE: () = assert!(size_of::() == size_of::()); // It is safe to cast a `*const u8` to a `const c_char` as they are the // same size and alignment. diff --git a/src/polyfill/notsend.rs b/src/polyfill/notsend.rs index 1200c99c8b..fbffeb2786 100644 --- a/src/polyfill/notsend.rs +++ b/src/polyfill/notsend.rs @@ -13,7 +13,7 @@ // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::test; -use core::marker::PhantomData; +use core::{marker::PhantomData, mem::size_of}; /// A ZST that can be added to any type to make the type `!Send`. #[derive(Clone, Copy)] @@ -25,4 +25,4 @@ impl NotSend { const _: () = test::compile_time_assert_clone::(); const _: () = test::compile_time_assert_copy::(); -const _: () = assert!(core::mem::size_of::() == 0); +const _: () = assert!(size_of::() == 0); diff --git a/src/polyfill/slice.rs b/src/polyfill/slice.rs index 0a92e2596d..0e9d2e294d 100644 --- a/src/polyfill/slice.rs +++ b/src/polyfill/slice.rs @@ -22,6 +22,8 @@ // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use core::mem::size_of; + // TODO(MSRV feature(slice_as_chunks)): Use `slice::as_chunks` instead. // This is copied from the libcore implementation of `slice::as_chunks`. #[inline(always)] @@ -56,7 +58,7 @@ pub fn as_chunks_mut(slice: &mut [T]) -> (&mut [[T; N]], &mut // TODO(MSRV feature(slice_flatten)): Use `slice::flatten` instead. // This is derived from the libcore implementation, using only stable APIs. pub fn flatten(slice: &[[T; N]]) -> &[T] { - let len = if core::mem::size_of::() == 0 { + let len = if size_of::() == 0 { slice.len().checked_mul(N).expect("slice len overflow") } else { // SAFETY: `slice.len() * N` cannot overflow because `slice` is @@ -70,7 +72,7 @@ pub fn flatten(slice: &[[T; N]]) -> &[T] { // TODO(MSRV feature(slice_flatten)): Use `slice::flatten_mut` instead. // This is derived from the libcore implementation, using only stable APIs. pub fn flatten_mut(slice: &mut [[T; N]]) -> &mut [T] { - let len = if core::mem::size_of::() == 0 { + let len = if size_of::() == 0 { slice.len().checked_mul(N).expect("slice len overflow") } else { // SAFETY: `slice.len() * N` cannot overflow because `slice` is