Skip to content

Commit 85b3c2e

Browse files
authored
Messages v2 included migration (#1738)
# Goal The goal of this PR is to propose and implement messages v2 compatible with PoV Closes #198 # Discussion - Refactored Messages to minimize used PoV - Added storage migration (single block) # Migration Details - Based on data used in rococo and main-net and calculations we don't need to do a multi-block migration. (only around 15%) of the block is being used. - Was not able to test with upgrading on local due to getting errors when running relay nodes - Was able to successfully run try-run-time cli tool against rococo # Checklist - [x] Chain spec updated - [x] Design doc(s) updated - [x] Tests added - [x] Benchmarks added - [x] Weights updated
1 parent 8f55a11 commit 85b3c2e

File tree

17 files changed

+407
-242
lines changed

17 files changed

+407
-242
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

designdocs/message_storage_v2.md

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# On Chain Message Storage
2+
3+
## Context and Scope
4+
The proposed feature consists of changes that is going to be one (or more) pallet(s) in runtime of a
5+
Substrate based blockchain, and it will be used in all environments including production.
6+
7+
## Problem Statement
8+
After introduction of **Proof of Validity** or **PoV** in runtime weights, all pallets should be
9+
re-evaluated and refactored if necessary to minimize the usage of **PoV**. This is to ensure all
10+
important operations are scalable.
11+
This document tries to propose some changes on **Messages** pallet to optimize the **PoV** size.
12+
13+
## Goals
14+
- Minimizing Weights including **execution times** and **PoV** size.
15+
16+
## Proposal
17+
Storing messages on chain using **BlockNumber** and **SchemaId** and **MessageIndex** as main and secondary
18+
and tertiary keys using [StorageNMap](https://paritytech.github.io/substrate/master/frame_support/storage/trait.StorageNMap.html) data structure provided in Substrate.
19+
20+
### Main Storage types
21+
- **MessagesV2**
22+
- _Type_: `StorageNMap<(BlockNumber, SchemaId, MessageIndex), Message>`
23+
- _Purpose_: Main structure To store all messages for a certain block number and schema id and
24+
index
25+
26+
27+
### On Chain Structure
28+
Following is a proposed data structure for storing a Message on chain.
29+
```rust
30+
/// only `index` is removed from old structure
31+
pub struct Message<AccountId> {
32+
pub payload: Vec<u8>, // Serialized data in a user-defined schemas format
33+
pub provider_key: AccountId, // Signature of the signer
34+
pub msa_id: u64, // Message source account id (the original source of the message)
35+
}
36+
```
37+
## Description
38+
39+
The idea is to use existing **whitelisted** storage with `BlockMessageIndex` type to store and get
40+
the index of each message to be able to use it as our third key for `StorageNMap`.
41+
42+
We would store each message separately into `StorageNMap` with following keys
43+
- primary key would be `block_number`
44+
- secondary key would be `schema_id`
45+
- tertiary key would be the `index` of the message for current block which starts from 0
46+
47+

e2e/capacity/transactions.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ describe('Capacity Transactions', function () {
218218

219219
const { eventMap } = await call.payWithCapacity();
220220
assertEvent(eventMap, 'capacity.CapacityWithdrawn');
221-
assertEvent(eventMap, 'messages.MessagesStored');
221+
assertEvent(eventMap, 'messages.MessagesInBlock');
222222
});
223223

224224
it('successfully pays with Capacity for eligible transaction - addOnchainMessage', async function () {
@@ -227,7 +227,7 @@ describe('Capacity Transactions', function () {
227227
const call = ExtrinsicHelper.addOnChainMessage(capacityKeys, dummySchemaId, '0xdeadbeef');
228228
const { eventMap } = await call.payWithCapacity();
229229
assertEvent(eventMap, 'capacity.CapacityWithdrawn');
230-
assertEvent(eventMap, 'messages.MessagesStored');
230+
assertEvent(eventMap, 'messages.MessagesInBlock');
231231
const get = await ExtrinsicHelper.apiPromise.rpc.messages.getBySchemaId(dummySchemaId, {
232232
from_block: starting_block,
233233
from_index: 0,

e2e/messages/addIPFSMessage.test.ts

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,7 @@ describe('Add Offchain Message', function () {
107107
const f = ExtrinsicHelper.addIPFSMessage(keys, schemaId, ipfs_cid_64, ipfs_payload_len);
108108
const { target: event } = await f.fundAndSend(fundingSource);
109109

110-
assert.notEqual(event, undefined, 'should have returned a MessagesStored event');
111-
assert.deepEqual(event?.data.schemaId, schemaId, 'schema ids should be equal');
112-
assert.notEqual(event?.data.blockNumber, undefined, 'should have a block number');
110+
assert.notEqual(event, undefined, 'should have returned a MessagesInBlock event');
113111
});
114112

115113
it('should successfully retrieve added message and returned CID should have Base32 encoding', async function () {
@@ -130,9 +128,7 @@ describe('Add Offchain Message', function () {
130128
const f = ExtrinsicHelper.addOnChainMessage(keys, dummySchemaId, '0xdeadbeef');
131129
const { target: event } = await f.fundAndSend(fundingSource);
132130

133-
assert.notEqual(event, undefined, 'should have returned a MessagesStored event');
134-
assert.deepEqual(event?.data.schemaId, dummySchemaId, 'schema ids should be equal');
135-
assert.notEqual(event?.data.blockNumber, undefined, 'should have a block number');
131+
assert.notEqual(event, undefined, 'should have returned a MessagesInBlock event');
136132

137133
const get = await ExtrinsicHelper.apiPromise.rpc.messages.getBySchemaId(dummySchemaId, {
138134
from_block: starting_block,

e2e/package-lock.json

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

e2e/scaffolding/extrinsicHelpers.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ export class ExtrinsicHelper {
483483
return new Extrinsic(
484484
() => ExtrinsicHelper.api.tx.messages.addIpfsMessage(schemaId, cid, payload_length),
485485
keys,
486-
ExtrinsicHelper.api.events.messages.MessagesStored
486+
ExtrinsicHelper.api.events.messages.MessagesInBlock
487487
);
488488
}
489489

@@ -668,7 +668,7 @@ export class ExtrinsicHelper {
668668
return new Extrinsic(
669669
() => ExtrinsicHelper.api.tx.messages.addOnchainMessage(null, schemaId, payload),
670670
keys,
671-
ExtrinsicHelper.api.events.messages.MessagesStored
671+
ExtrinsicHelper.api.events.messages.MessagesInBlock
672672
);
673673
}
674674

node/cli/Cargo.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ cli-opt = { default-features = false, path = "../cli-opt" }
2929

3030
# Substrate
3131
frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" }
32+
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" }
3233
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" }
3334
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" }
3435
pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" }
@@ -70,6 +71,7 @@ cli = [
7071
"sc-cli",
7172
"sc-service",
7273
"frame-benchmarking-cli",
74+
"frame-benchmarking",
7375
"try-runtime-cli"
7476
]
7577
default = ["std", "cli"]

node/cli/src/command.rs

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -371,22 +371,24 @@ pub fn run() -> Result<()> {
371371

372372
#[cfg(feature = "try-runtime")]
373373
Some(Subcommand::TryRuntime(cmd)) => {
374-
use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch};
374+
use common_runtime::constants::MILLISECS_PER_BLOCK;
375+
use try_runtime_cli::block_building_info::timestamp_with_aura_info;
376+
375377
let runner = cli.create_runner(cmd)?;
376-
runner.async_run(|config| {
377-
// we don't need any of the components of new_partial, just a runtime, or a task
378-
// manager to do `async_run`.
379-
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
380-
let task_manager =
381-
sc_service::TaskManager::new(config.tokio_handle.clone(), registry)
382-
.map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?;
383-
Ok((
384-
cmd.run::<Block, ExtendedHostFunctions<
385-
sp_io::SubstrateHostFunctions,
386-
<ExecutorDispatch as NativeExecutionDispatch>::ExtendHostFunctions,
387-
>>(),
388-
task_manager,
389-
))
378+
379+
type HostFunctions =
380+
(sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions);
381+
382+
// grab the task manager.
383+
let registry = &runner.config().prometheus_config.as_ref().map(|cfg| &cfg.registry);
384+
let task_manager =
385+
sc_service::TaskManager::new(runner.config().tokio_handle.clone(), *registry)
386+
.map_err(|e| format!("Error: {:?}", e))?;
387+
388+
let info_provider = timestamp_with_aura_info(MILLISECS_PER_BLOCK);
389+
390+
runner.async_run(|_| {
391+
Ok((cmd.run::<Block, HostFunctions, _>(Some(info_provider)), task_manager))
390392
})
391393
},
392394
Some(Subcommand::ExportRuntimeVersion(cmd)) => {

pallets/messages/src/benchmarking.rs

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ use sp_runtime::traits::One;
1515

1616
const IPFS_SCHEMA_ID: u16 = 50;
1717
const IPFS_PAYLOAD_LENGTH: u32 = 10;
18+
const MAX_MESSAGES_IN_BLOCK: u32 = 500;
1819

1920
fn onchain_message<T: Config>(schema_id: SchemaId) -> DispatchResult {
2021
let message_source_id = DelegatorId(1);
@@ -62,8 +63,6 @@ fn create_schema<T: Config>(location: PayloadLocation) -> DispatchResult {
6263
}
6364

6465
benchmarks! {
65-
// this is temporary to avoid massive PoV sizes which will break the chain until rework on messages
66-
#[pov_mode = Measured]
6766
add_onchain_message {
6867
let n in 0 .. T::MessagesMaxPayloadSizeBytes::get() - 1;
6968
let message_source_id = DelegatorId(2);
@@ -78,21 +77,17 @@ benchmarks! {
7877
assert_ok!(T::MsaBenchmarkHelper::set_delegation_relationship(ProviderId(1), message_source_id.into(), [schema_id].to_vec()));
7978

8079
let payload = vec![1; n as usize];
81-
let average_messages_per_block: u32 = T::MaxMessagesPerBlock::get() / 2;
82-
for j in 1 .. average_messages_per_block {
80+
for j in 1 .. MAX_MESSAGES_IN_BLOCK {
8381
assert_ok!(onchain_message::<T>(schema_id));
8482
}
8583
}: _ (RawOrigin::Signed(caller), Some(message_source_id.into()), schema_id, payload)
8684
verify {
87-
assert_eq!(
88-
MessagesPallet::<T>::get_messages(
89-
BlockNumberFor::<T>::one(), schema_id).len(),
90-
average_messages_per_block as usize
85+
assert_eq!(MessagesPallet::<T>::get_messages_by_schema_and_block(
86+
schema_id, PayloadLocation::OnChain, BlockNumberFor::<T>::one()).len(),
87+
MAX_MESSAGES_IN_BLOCK as usize
9188
);
9289
}
9390

94-
// this is temporary to avoid massive PoV sizes which will break the chain until rework on messages
95-
#[pov_mode = Measured]
9691
add_ipfs_message {
9792
let caller: T::AccountId = whitelisted_caller();
9893
let cid = "bafkreidgvpkjawlxz6sffxzwgooowe5yt7i6wsyg236mfoks77nywkptdq".as_bytes().to_vec();
@@ -102,16 +97,14 @@ benchmarks! {
10297
assert_ok!(create_schema::<T>(PayloadLocation::IPFS));
10398
}
10499
assert_ok!(T::MsaBenchmarkHelper::add_key(ProviderId(1).into(), caller.clone()));
105-
let average_messages_per_block: u32 = T::MaxMessagesPerBlock::get() / 2;
106-
for j in 1 .. average_messages_per_block {
100+
for j in 1 .. MAX_MESSAGES_IN_BLOCK {
107101
assert_ok!(ipfs_message::<T>(IPFS_SCHEMA_ID));
108102
}
109103
}: _ (RawOrigin::Signed(caller),IPFS_SCHEMA_ID, cid, IPFS_PAYLOAD_LENGTH)
110104
verify {
111-
assert_eq!(
112-
MessagesPallet::<T>::get_messages(
113-
BlockNumberFor::<T>::one(), IPFS_SCHEMA_ID).len(),
114-
average_messages_per_block as usize
105+
assert_eq!(MessagesPallet::<T>::get_messages_by_schema_and_block(
106+
IPFS_SCHEMA_ID, PayloadLocation::IPFS, BlockNumberFor::<T>::one()).len(),
107+
MAX_MESSAGES_IN_BLOCK as usize
115108
);
116109
}
117110

0 commit comments

Comments
 (0)