Skip to content

Commit

Permalink
chore: remove some allow(dead_code) annotations and associated dead…
Browse files Browse the repository at this point in the history
… code (#4472)

We have quite a few `allow(dead_code)` annotations. While it's OK to use
in situations where the Cargo-feature combination explodes and makes it
hard to reason about when something is actually used or not, in other
situations it can be avoided, and show actual, dead code.
  • Loading branch information
bnjbvr authored Jan 8, 2025
1 parent 47c24b9 commit aca8c8b
Show file tree
Hide file tree
Showing 10 changed files with 14 additions and 84 deletions.
4 changes: 1 addition & 3 deletions crates/matrix-sdk-common/src/linked_chunk/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ use super::{
/// which will get resolved later when re-building the full data structure. This
/// allows using chunks that references other chunks that aren't known yet.
struct TemporaryChunk<Item, Gap> {
id: ChunkIdentifier,
previous: Option<ChunkIdentifier>,
next: Option<ChunkIdentifier>,
content: ChunkContent<Item, Gap>,
Expand Down Expand Up @@ -79,7 +78,7 @@ impl<const CAP: usize, Item, Gap> LinkedChunkBuilder<CAP, Item, Gap> {
next: Option<ChunkIdentifier>,
content: Gap,
) {
let chunk = TemporaryChunk { id, previous, next, content: ChunkContent::Gap(content) };
let chunk = TemporaryChunk { previous, next, content: ChunkContent::Gap(content) };
self.chunks.insert(id, chunk);
}

Expand All @@ -96,7 +95,6 @@ impl<const CAP: usize, Item, Gap> LinkedChunkBuilder<CAP, Item, Gap> {
items: impl IntoIterator<Item = Item>,
) {
let chunk = TemporaryChunk {
id,
previous,
next,
content: ChunkContent::Items(items.into_iter().collect()),
Expand Down
1 change: 0 additions & 1 deletion crates/matrix-sdk-common/src/linked_chunk/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#![allow(dead_code)]
#![allow(rustdoc::private_intra_doc_links)]

//! A linked chunk is the underlying data structure that holds all events.
Expand Down
3 changes: 3 additions & 0 deletions crates/matrix-sdk-common/src/linked_chunk/updates.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ impl<Item, Gap> ObservableUpdates<Item, Gap> {
}

/// Subscribe to updates by using a [`Stream`].
#[cfg(test)]
pub(super) fn subscribe(&mut self) -> UpdatesSubscriber<Item, Gap> {
// A subscriber is a new update reader, it needs its own token.
let token = self.new_reader_token();
Expand Down Expand Up @@ -264,6 +265,7 @@ impl<Item, Gap> UpdatesInner<Item, Gap> {
}

/// Return the number of updates in the buffer.
#[cfg(test)]
fn len(&self) -> usize {
self.updates.len()
}
Expand Down Expand Up @@ -302,6 +304,7 @@ pub(super) struct UpdatesSubscriber<Item, Gap> {

impl<Item, Gap> UpdatesSubscriber<Item, Gap> {
/// Create a new [`Self`].
#[cfg(test)]
fn new(updates: Weak<RwLock<UpdatesInner<Item, Gap>>>, token: ReaderToken) -> Self {
Self { updates, token }
}
Expand Down
21 changes: 0 additions & 21 deletions crates/matrix-sdk-crypto/src/olm/group_sessions/inbound.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,27 +221,6 @@ impl InboundGroupSession {
Self::try_from(exported_session)
}

#[allow(dead_code)]
fn from_backup(
room_id: &RoomId,
backup: BackedUpRoomKey,
) -> Result<Self, SessionCreationError> {
// We're using this session only to get the session id, the session
// config doesn't matter here.
let session = InnerSession::import(&backup.session_key, SessionConfig::default());
let session_id = session.session_id();

Self::from_export(&ExportedRoomKey {
algorithm: backup.algorithm,
room_id: room_id.to_owned(),
sender_key: backup.sender_key,
session_id,
forwarding_curve25519_key_chain: vec![],
session_key: backup.session_key,
sender_claimed_keys: backup.sender_claimed_keys,
})
}

/// Store the group session as a base64 encoded string.
///
/// # Arguments
Expand Down
18 changes: 0 additions & 18 deletions crates/matrix-sdk-indexeddb/src/safe_encode.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
//! Helpers for wasm32/browser environments
#![allow(dead_code)]
use base64::{
alphabet,
engine::{general_purpose, GeneralPurpose},
Expand Down Expand Up @@ -51,23 +50,6 @@ pub trait SafeEncode {
.encode(store_cipher.hash_key(table_name, self.as_encoded_string().as_bytes()))
}

/// encode self into a JsValue, internally using `as_encoded_string`
/// to escape the value of self, and append the given counter
fn encode_with_counter(&self, i: usize) -> JsValue {
format!("{}{KEY_SEPARATOR}{i:016x}", self.as_encoded_string()).into()
}

/// encode self into a JsValue, internally using `as_secure_string`
/// to escape the value of self, and append the given counter
fn encode_with_counter_secure(
&self,
table_name: &str,
store_cipher: &StoreCipher,
i: usize,
) -> JsValue {
format!("{}{KEY_SEPARATOR}{i:016x}", self.as_secure_string(table_name, store_cipher)).into()
}

/// Encode self into a IdbKeyRange for searching all keys that are
/// prefixed with this key, followed by `KEY_SEPARATOR`. Internally
/// uses `as_encoded_string` to ensure the given key is escaped properly.
Expand Down
1 change: 0 additions & 1 deletion crates/matrix-sdk-indexeddb/src/state_store/migrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ const CURRENT_META_DB_VERSION: u32 = 2;

/// Sometimes Migrations can't proceed without having to drop existing
/// data. This allows you to configure, how these cases should be handled.
#[allow(dead_code)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum MigrationConflictStrategy {
/// Just drop the data, we don't care that we have to sync again
Expand Down
7 changes: 3 additions & 4 deletions crates/matrix-sdk-sqlite/src/crypto_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1420,8 +1420,7 @@ mod tests {
struct TestDb {
// Needs to be kept alive because the Drop implementation for TempDir deletes the
// directory.
#[allow(dead_code)]
dir: TempDir,
_dir: TempDir,
database: SqliteCryptoStore,
}

Expand All @@ -1440,14 +1439,14 @@ mod tests {
let database =
SqliteCryptoStore::open(tmpdir.path(), None).await.expect("Can't open the test store");

TestDb { dir: tmpdir, database }
TestDb { _dir: tmpdir, database }
}

/// Test that we didn't regress in our storage layer by loading data from a
/// pre-filled database, or in other words use a test vector for this.
#[async_test]
async fn test_open_test_vector_store() {
let TestDb { dir: _, database } = get_test_db().await;
let TestDb { _dir: _, database } = get_test_db().await;

let account = database
.load_account()
Expand Down
24 changes: 0 additions & 24 deletions crates/matrix-sdk-sqlite/src/event_cache_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@

//! A sqlite-based backend for the [`EventCacheStore`].
#![allow(dead_code)] // Most of the unused code may be used soonish.

use std::{borrow::Cow, fmt, path::Path, sync::Arc};

use async_trait::async_trait;
Expand Down Expand Up @@ -143,28 +141,6 @@ impl SqliteEventCacheStore {
row.get::<_, String>(3)?,
))
}

async fn load_chunk_with_id(
&self,
room_id: &RoomId,
chunk_id: ChunkIdentifier,
) -> Result<RawChunk<Event, Gap>> {
let hashed_room_id = self.encode_key(keys::LINKED_CHUNKS, room_id);

let this = self.clone();

self
.acquire()
.await?
.with_transaction(move |txn| -> Result<_> {
let (id, previous, next, chunk_type) = txn.query_row(
"SELECT id, previous, next, type FROM linked_chunks WHERE room_id = ? AND chunk_id = ?",
(&hashed_room_id, chunk_id.index()),
Self::map_row_to_chunk
)?;
txn.rebuild_chunk(&this, &hashed_room_id, previous, id, next, chunk_type.as_str())
}).await
}
}

trait TransactionExtForLinkedChunks {
Expand Down
16 changes: 7 additions & 9 deletions crates/matrix-sdk/src/sliding_sync/list/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,20 +204,18 @@ impl SlidingSyncList {
pub(super) fn invalidate_sticky_data(&self) {
let _ = self.inner.sticky.write().unwrap().data_mut();
}
}

#[cfg(any(test, feature = "testing"))]
#[allow(dead_code)]
impl SlidingSyncList {
/// Set the maximum number of rooms.
pub(super) fn set_maximum_number_of_rooms(&self, maximum_number_of_rooms: Option<u32>) {
self.inner.maximum_number_of_rooms.set(maximum_number_of_rooms);
}

/// Get the sync-mode.
#[cfg(feature = "testing")]
pub fn sync_mode(&self) -> SlidingSyncMode {
self.inner.sync_mode.read().unwrap().clone()
}

/// Set the maximum number of rooms.
#[cfg(test)]
pub(super) fn set_maximum_number_of_rooms(&self, maximum_number_of_rooms: Option<u32>) {
self.inner.maximum_number_of_rooms.set(maximum_number_of_rooms);
}
}

#[derive(Debug)]
Expand Down
3 changes: 0 additions & 3 deletions testing/matrix-sdk-test/src/test_json/keys_query_sets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ use crate::{
/// devices are properly signed by `@good` (i.e were self-verified by @good)
pub struct KeyDistributionTestData {}

#[allow(dead_code)]
impl KeyDistributionTestData {
pub const MASTER_KEY_PRIVATE_EXPORT: &'static str =
"9kquJqAtEUoTXljh5W2QSsCm4FH9WvWzIkDkIMUsM2k";
Expand Down Expand Up @@ -529,7 +528,6 @@ impl IdentityChangeDataSet {
/// The `/keys/query` responses were generated using a local synapse.
pub struct VerificationViolationTestData {}

#[allow(dead_code)]
impl VerificationViolationTestData {
/// Secret part of Alice's master cross-signing key.
///
Expand Down Expand Up @@ -1130,7 +1128,6 @@ impl VerificationViolationTestData {
/// For user @malo, that performed an identity change with the same device.
pub struct MaloIdentityChangeDataSet {}

#[allow(dead_code)]
impl MaloIdentityChangeDataSet {
pub fn user_id() -> &'static UserId {
user_id!("@malo:localhost")
Expand Down

0 comments on commit aca8c8b

Please sign in to comment.