Skip to content

Commit

Permalink
Clippy use --all-features and various fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
LDeakin committed Dec 22, 2023
1 parent 55c57f2 commit 01ccacc
Show file tree
Hide file tree
Showing 19 changed files with 121 additions and 84 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@ jobs:
- run: cargo check # default features
- run: cargo check --no-default-features
- run: cargo fmt --all -- --check
- run: cargo clippy -- -D warnings
- run: cargo clippy --all-features -- -D warnings
6 changes: 3 additions & 3 deletions BUILD.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@ cargo build --all-features && \
cargo test --all-features && \
cargo +nightly doc --all-features && \
cargo fmt --all -- --check && \
cargo clippy -- -D warnings && \
cargo clippy --all-features -- -D warnings && \
cargo check && \
cargo check --no-default-features
```

```bash
# Additional checks
cargo clippy -- -D warnings -W clippy::nursery -A clippy::significant_drop_tightening -A clippy::significant_drop_in_scrutinee
# cargo clippy -- -D warnings -W clippy::unwrap_used -W clippy::expect_used
cargo clippy --all-features -- -D warnings -W clippy::nursery -A clippy::significant_drop_tightening -A clippy::significant_drop_in_scrutinee
# cargo clippy --all-features -- -D warnings -W clippy::unwrap_used -W clippy::expect_used
```

## Performance
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Revise code coverage section in `BUILD.md` to use `cargo-llvm-cov`
- Increased code coverage in some modules
- Add `--all-features` to clippy usage in `BUILD.md` and `ci.yml`
- Fixed various clippy warnings

### Fixed
- Fixed chunk key encoding for 0 dimensional arrays with `default` and `v2` encoding
Expand Down
56 changes: 36 additions & 20 deletions src/array/array_async.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
/// - `chunk_indices` are invalid,
/// - there is a codec decoding error, or
/// - an underlying store error.
pub async fn async_retrieve_chunk_elements<T: TriviallyTransmutable>(
pub async fn async_retrieve_chunk_elements<T: TriviallyTransmutable + Send + Sync>(
&self,
chunk_indices: &[u64],
) -> Result<Box<[T]>, ArrayError> {
Expand Down Expand Up @@ -144,7 +144,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
///
/// # Panics
/// Will panic if a chunk dimension is larger than `usize::MAX`.
pub async fn async_retrieve_chunk_ndarray<T: safe_transmute::TriviallyTransmutable>(
pub async fn async_retrieve_chunk_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
chunk_indices: &[u64],
) -> Result<ndarray::ArrayD<T>, ArrayError> {
Expand Down Expand Up @@ -425,7 +427,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
self._async_retrieve_array_subset(array_subset, true).await
}

async fn _async_retrieve_array_subset_elements<T: TriviallyTransmutable>(
async fn _async_retrieve_array_subset_elements<T: TriviallyTransmutable + Send + Sync>(
&self,
array_subset: &ArraySubset,
parallel: bool,
Expand Down Expand Up @@ -469,7 +471,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
/// - an array subset is invalid or out of bounds of the array,
/// - there is a codec decoding error, or
/// - an underlying store error.
pub async fn async_retrieve_array_subset_elements<T: TriviallyTransmutable>(
pub async fn async_retrieve_array_subset_elements<T: TriviallyTransmutable + Send + Sync>(
&self,
array_subset: &ArraySubset,
) -> Result<Box<[T]>, ArrayError> {
Expand All @@ -479,7 +481,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {

/// Parallel version of [`Array::retrieve_array_subset_elements`].
#[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)]
pub async fn async_par_retrieve_array_subset_elements<T: TriviallyTransmutable>(
pub async fn async_par_retrieve_array_subset_elements<
T: TriviallyTransmutable + Send + Sync,
>(
&self,
array_subset: &ArraySubset,
) -> Result<Box<[T]>, ArrayError> {
Expand All @@ -488,7 +492,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
}

#[cfg(feature = "ndarray")]
async fn _async_retrieve_array_subset_ndarray<T: safe_transmute::TriviallyTransmutable>(
async fn _async_retrieve_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
array_subset: &ArraySubset,
parallel: bool,
Expand Down Expand Up @@ -529,7 +535,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
///
/// # Panics
/// Will panic if any dimension in `chunk_subset` is `usize::MAX` or larger.
pub async fn async_retrieve_array_subset_ndarray<T: safe_transmute::TriviallyTransmutable>(
pub async fn async_retrieve_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
array_subset: &ArraySubset,
) -> Result<ndarray::ArrayD<T>, ArrayError> {
Expand All @@ -541,7 +549,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
/// Parallel version of [`Array::retrieve_array_subset_ndarray`].
#[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)]
pub async fn async_par_retrieve_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable,
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
array_subset: &ArraySubset,
Expand Down Expand Up @@ -610,7 +618,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
/// - the chunk subset is invalid,
/// - there is a codec decoding error, or
/// - an underlying store error.
pub async fn async_retrieve_chunk_subset_elements<T: TriviallyTransmutable>(
pub async fn async_retrieve_chunk_subset_elements<T: TriviallyTransmutable + Send + Sync>(
&self,
chunk_indices: &[u64],
chunk_subset: &ArraySubset,
Expand Down Expand Up @@ -657,7 +665,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits> Array<TStorage> {
///
/// # Panics
/// Will panic if the number of elements in `chunk_subset` is `usize::MAX` or larger.
pub async fn async_retrieve_chunk_subset_ndarray<T: TriviallyTransmutable>(
pub async fn async_retrieve_chunk_subset_ndarray<T: TriviallyTransmutable + Send + Sync>(
&self,
chunk_indices: &[u64],
chunk_subset: &ArraySubset,
Expand Down Expand Up @@ -803,7 +811,7 @@ impl<TStorage: ?Sized + AsyncWritableStorageTraits> Array<TStorage> {
/// Returns an [`ArrayError`] if
/// - the size of `T` does not match the data type size, or
/// - a [`store_chunk`](Array::store_chunk) error condition is met.
pub async fn async_store_chunk_elements<T: TriviallyTransmutable>(
pub async fn async_store_chunk_elements<T: TriviallyTransmutable + Send>(
&self,
chunk_indices: &[u64],
chunk_elements: Vec<T>,
Expand All @@ -827,7 +835,9 @@ impl<TStorage: ?Sized + AsyncWritableStorageTraits> Array<TStorage> {
/// - the size of `T` does not match the size of the data type,
/// - a [`store_chunk_elements`](Array::store_chunk_elements) error condition is met.
#[allow(clippy::missing_panics_doc)]
pub async fn async_store_chunk_ndarray<T: safe_transmute::TriviallyTransmutable>(
pub async fn async_store_chunk_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
chunk_indices: &[u64],
chunk_array: &ndarray::ArrayViewD<'_, T>,
Expand Down Expand Up @@ -1050,7 +1060,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
.await
}

async fn _async_store_array_subset_elements<T: TriviallyTransmutable>(
async fn _async_store_array_subset_elements<T: TriviallyTransmutable + Send>(
&self,
array_subset: &ArraySubset,
subset_elements: Vec<T>,
Expand All @@ -1076,7 +1086,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
/// Returns an [`ArrayError`] if
/// - the size of `T` does not match the data type size, or
/// - a [`store_array_subset`](Array::store_array_subset) error condition is met.
pub async fn async_store_array_subset_elements<T: TriviallyTransmutable>(
pub async fn async_store_array_subset_elements<T: TriviallyTransmutable + Send>(
&self,
array_subset: &ArraySubset,
subset_elements: Vec<T>,
Expand All @@ -1087,7 +1097,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>

/// Parallel version of [`Array::store_array_subset_elements`].
#[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)]
pub async fn async_par_store_array_subset_elements<T: TriviallyTransmutable>(
pub async fn async_par_store_array_subset_elements<T: TriviallyTransmutable + Send>(
&self,
array_subset: &ArraySubset,
subset_elements: Vec<T>,
Expand All @@ -1097,7 +1107,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
}

#[cfg(feature = "ndarray")]
async fn _async_store_array_subset_ndarray<T: safe_transmute::TriviallyTransmutable>(
async fn _async_store_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
subset_start: &[u64],
subset_array: &ndarray::ArrayViewD<'_, T>,
Expand Down Expand Up @@ -1135,7 +1147,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
/// # Errors
/// Returns an [`ArrayError`] if a [`store_array_subset_elements`](Array::store_array_subset_elements) error condition is met.
#[allow(clippy::missing_panics_doc)]
pub async fn async_store_array_subset_ndarray<T: safe_transmute::TriviallyTransmutable>(
pub async fn async_store_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
subset_start: &[u64],
subset_array: &ndarray::ArrayViewD<'_, T>,
Expand All @@ -1147,7 +1161,9 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
#[cfg(feature = "ndarray")]
/// Parallel version of [`Array::store_array_subset_ndarray`].
#[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)]
pub async fn async_par_store_array_subset_ndarray<T: safe_transmute::TriviallyTransmutable>(
pub async fn async_par_store_array_subset_ndarray<
T: safe_transmute::TriviallyTransmutable + Send + Sync,
>(
&self,
subset_start: &[u64],
subset_array: &ndarray::ArrayViewD<'_, T>,
Expand Down Expand Up @@ -1240,7 +1256,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
/// Returns an [`ArrayError`] if
/// - the size of `T` does not match the data type size, or
/// - a [`store_chunk_subset`](Array::store_chunk_subset) error condition is met.
pub async fn async_store_chunk_subset_elements<T: TriviallyTransmutable>(
pub async fn async_store_chunk_subset_elements<T: TriviallyTransmutable + Send + Sync>(
&self,
chunk_indices: &[u64],
chunk_subset: &ArraySubset,
Expand All @@ -1266,7 +1282,7 @@ impl<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncWritableStorageTraits>
/// # Errors
/// Returns an [`ArrayError`] if a [`store_chunk_subset_elements`](Array::store_chunk_subset_elements) error condition is met.
#[allow(clippy::missing_panics_doc)]
pub async fn async_store_chunk_subset_ndarray<T: TriviallyTransmutable>(
pub async fn async_store_chunk_subset_ndarray<T: TriviallyTransmutable + Send + Sync>(
&self,
chunk_indices: &[u64],
chunk_subset_start: &[u64],
Expand Down
2 changes: 1 addition & 1 deletion src/array/chunk_key_encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ where
T: ChunkKeyEncodingTraits + 'static,
{
fn from(chunk_key_encoding: T) -> Self {
ChunkKeyEncoding::new(chunk_key_encoding)
Self::new(chunk_key_encoding)
}
}

Expand Down
24 changes: 9 additions & 15 deletions src/array/codec/array_to_array/bitround.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,46 +44,40 @@ fn create_codec_bitround(metadata: &Metadata) -> Result<Codec, PluginCreateError
Ok(Codec::ArrayToArray(codec))
}

fn round_bits16(mut input: u16, keepbits: u32, maxbits: u32) -> u16 {
if keepbits >= maxbits {
input
} else {
const fn round_bits16(mut input: u16, keepbits: u32, maxbits: u32) -> u16 {
if keepbits < maxbits {
let maskbits = maxbits - keepbits;
let all_set = u16::MAX;
let mask = (all_set >> maskbits) << maskbits;
let half_quantum1 = (1 << (maskbits - 1)) - 1;
input += ((input >> maskbits) & 1) + half_quantum1;
input &= mask;
input
}
input
}

fn round_bits32(mut input: u32, keepbits: u32, maxbits: u32) -> u32 {
if keepbits >= maxbits {
input
} else {
const fn round_bits32(mut input: u32, keepbits: u32, maxbits: u32) -> u32 {
if keepbits < maxbits {
let maskbits = maxbits - keepbits;
let all_set = u32::MAX;
let mask = (all_set >> maskbits) << maskbits;
let half_quantum1 = (1 << (maskbits - 1)) - 1;
input += ((input >> maskbits) & 1) + half_quantum1;
input &= mask;
input
}
input
}

fn round_bits64(mut input: u64, keepbits: u32, maxbits: u32) -> u64 {
if keepbits >= maxbits {
input
} else {
const fn round_bits64(mut input: u64, keepbits: u32, maxbits: u32) -> u64 {
if keepbits < maxbits {
let maskbits = maxbits - keepbits;
let all_set = u64::MAX;
let mask = (all_set >> maskbits) << maskbits;
let half_quantum1 = (1 << (maskbits - 1)) - 1;
input += ((input >> maskbits) & 1) + half_quantum1;
input &= mask;
input
}
input
}

fn round_bytes(bytes: &mut [u8], data_type: &DataType, keepbits: u32) -> Result<(), CodecError> {
Expand Down
4 changes: 2 additions & 2 deletions src/array/codec/array_to_array/bitround/bitround_codec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ impl BitroundCodec {
///
/// `keepbits` is the number of bits to round to in the floating point mantissa.
#[must_use]
pub fn new(keepbits: u32) -> Self {
pub const fn new(keepbits: u32) -> Self {
Self { keepbits }
}

/// Create a new `bitround` codec from a configuration.
#[must_use]
pub fn new_with_configuration(configuration: &BitroundCodecConfiguration) -> Self {
pub const fn new_with_configuration(configuration: &BitroundCodecConfiguration) -> Self {
let BitroundCodecConfiguration::V1(configuration) = configuration;
Self {
keepbits: configuration.keepbits,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ impl AsyncArrayPartialDecoderTraits for AsyncShardingPartialDecoder<'_> {
&chunk_representation,
)
.await?;
let overlap = unsafe { array_subset.overlap_unchecked(&chunk_subset) };
let overlap = unsafe { array_subset.overlap_unchecked(chunk_subset) };
let array_subset_in_chunk_subset =
unsafe { overlap.relative_to_unchecked(chunk_subset.start()) };
// Partial decoding is actually really slow with the blosc codec! Assume sharded chunks are small, and just decode the whole thing and extract bytes
Expand Down Expand Up @@ -682,7 +682,7 @@ impl AsyncArrayPartialDecoderTraits for AsyncShardingPartialDecoder<'_> {

// Write filled chunks
filled_chunks.par_iter().for_each(|chunk_subset| {
let overlap = unsafe { array_subset.overlap_unchecked(&chunk_subset) };
let overlap = unsafe { array_subset.overlap_unchecked(chunk_subset) };
let chunk_subset_in_array_subset =
unsafe { overlap.relative_to_unchecked(array_subset.start()) };
let mut data_idx = 0;
Expand Down
2 changes: 1 addition & 1 deletion src/array/codec/array_to_bytes/zfp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ pub struct ZfpExpertParams {
pub minexp: i32,
}

fn zarr_data_type_to_zfp_data_type(data_type: &DataType) -> Option<zfp_type> {
const fn zarr_data_type_to_zfp_data_type(data_type: &DataType) -> Option<zfp_type> {
match data_type {
DataType::Int32 | DataType::UInt32 => Some(zfp_type_zfp_type_int32),
DataType::Int64 | DataType::UInt64 => Some(zfp_type_zfp_type_int64),
Expand Down
2 changes: 1 addition & 1 deletion src/array/codec/array_to_bytes/zfp/zfp_bitstream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ impl ZfpBitstream {
NonNull::new(stream).map(Self)
}

pub fn as_bitstream(&self) -> *mut bitstream {
pub const fn as_bitstream(&self) -> *mut bitstream {
self.0.as_ptr()
}
}
Loading

0 comments on commit 01ccacc

Please sign in to comment.