Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into agustin-starlight-i…
Browse files Browse the repository at this point in the history
…nflation-rewards
  • Loading branch information
Agusrodri committed Sep 18, 2024
2 parents 4ab6e9c + 36de5a2 commit 3d57f42
Show file tree
Hide file tree
Showing 23 changed files with 2,650 additions and 1,157 deletions.
2 changes: 2 additions & 0 deletions node/src/chain_spec/dancebox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ pub fn development_config(
collators_per_parathread: 1,
parathreads_per_collator: 1,
target_container_chain_fullness: Perbill::from_percent(80),
max_parachain_cores_percentage: None,
},
..Default::default()
},
Expand Down Expand Up @@ -159,6 +160,7 @@ pub fn local_dancebox_config(
collators_per_parathread: 1,
parathreads_per_collator: 1,
target_container_chain_fullness: Perbill::from_percent(80),
max_parachain_cores_percentage: None,
},
..Default::default()
},
Expand Down
2 changes: 2 additions & 0 deletions node/src/chain_spec/flashbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ pub fn development_config(
collators_per_parathread: 1,
parathreads_per_collator: 1,
target_container_chain_fullness: Perbill::from_percent(80),
max_parachain_cores_percentage: None,
},
..Default::default()
},
Expand Down Expand Up @@ -159,6 +160,7 @@ pub fn local_flashbox_config(
collators_per_parathread: 1,
parathreads_per_collator: 1,
target_container_chain_fullness: Perbill::from_percent(80),
max_parachain_cores_percentage: None,
},
..Default::default()
},
Expand Down
176 changes: 149 additions & 27 deletions pallets/collator-assignment/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@

use {
crate::assignment::{Assignment, ChainNumCollators},
core::ops::Mul,
frame_support::{pallet_prelude::*, traits::Currency},
frame_system::pallet_prelude::BlockNumberFor,
rand::{seq::SliceRandom, SeedableRng},
Expand Down Expand Up @@ -72,6 +73,12 @@ mod mock;
#[cfg(test)]
mod tests;

#[derive(Encode, Decode, Debug, TypeInfo)]
pub struct CoreAllocationConfiguration {
pub core_count: u32,
pub max_parachain_percentage: Perbill,
}

#[frame_support::pallet]
pub mod pallet {
use super::*;
Expand Down Expand Up @@ -103,6 +110,7 @@ pub mod pallet {
type Currency: Currency<Self::AccountId>;
type CollatorAssignmentTip: CollatorAssignmentTip<BalanceOf<Self>>;
type ForceEmptyOrchestrator: Get<bool>;
type CoreAllocationConfiguration: Get<Option<CoreAllocationConfiguration>>;
/// The weight information of this pallet.
type WeightInfo: WeightInfo;
}
Expand Down Expand Up @@ -159,16 +167,130 @@ pub mod pallet {
}

impl<T: Config> Pallet<T> {
pub(crate) fn enough_collators_for_all_chains(
bulk_paras: &Vec<ChainNumCollators>,
pool_paras: &Vec<ChainNumCollators>,
target_session_index: T::SessionIndex,
number_of_collators: u32,
collators_per_container: u32,
collators_per_parathread: u32,
) -> bool {
number_of_collators
>= T::HostConfiguration::min_collators_for_orchestrator(target_session_index)
.saturating_add(collators_per_container.saturating_mul(bulk_paras.len() as u32))
.saturating_add(
collators_per_parathread.saturating_mul(pool_paras.len() as u32),
)
}

/// Takes the bulk paras (parachains) and pool paras (parathreads)
/// and checks if we if a) Do we have enough collators? b) Do we have enough cores?
/// If either of the answer is yes. We separately sort bulk_paras and pool_paras and
/// then append the two vectors.
pub(crate) fn order_paras_with_core_config(
mut bulk_paras: Vec<ChainNumCollators>,
mut pool_paras: Vec<ChainNumCollators>,
core_allocation_configuration: &CoreAllocationConfiguration,
target_session_index: T::SessionIndex,
number_of_collators: u32,
collators_per_container: u32,
collators_per_parathread: u32,
) -> (Vec<ChainNumCollators>, bool) {
let core_count = core_allocation_configuration.core_count;
let max_number_of_bulk_paras = core_allocation_configuration
.max_parachain_percentage
.mul(core_count);

let enough_cores_for_bulk_paras = bulk_paras.len() <= max_number_of_bulk_paras as usize;

let enough_collators = Self::enough_collators_for_all_chains(
&bulk_paras,
&pool_paras,
target_session_index,
number_of_collators,
collators_per_container,
collators_per_parathread,
);

// We should charge tip if parachain demand exceeds the `max_number_of_bulk_paras` OR
// if `num_collators` is not enough to satisfy collation need of all paras.
let should_charge_tip = !enough_cores_for_bulk_paras || !enough_collators;

// Currently, we are sorting both bulk and pool paras by tip, even when for example
// only number of bulk paras are restricted due to core availability since we deduct tip from
// all paras.
// We need to sort both separately as we have fixed space for parachains at the moment
// which means even when we have some parathread cores empty we cannot schedule parachain there.
if should_charge_tip {
bulk_paras.sort_by(|a, b| {
T::CollatorAssignmentTip::get_para_tip(b.para_id)
.cmp(&T::CollatorAssignmentTip::get_para_tip(a.para_id))
});

pool_paras.sort_by(|a, b| {
T::CollatorAssignmentTip::get_para_tip(b.para_id)
.cmp(&T::CollatorAssignmentTip::get_para_tip(a.para_id))
});
}

bulk_paras.truncate(max_number_of_bulk_paras as usize);
// We are not truncating pool paras, since their workload is not continuous one core
// can be shared by many paras during the session.

let chains: Vec<_> = bulk_paras.into_iter().chain(pool_paras).collect();

(chains, should_charge_tip)
}

pub(crate) fn order_paras(
bulk_paras: Vec<ChainNumCollators>,
pool_paras: Vec<ChainNumCollators>,
target_session_index: T::SessionIndex,
number_of_collators: u32,
collators_per_container: u32,
collators_per_parathread: u32,
) -> (Vec<ChainNumCollators>, bool) {
// Are there enough collators to satisfy the minimum demand?
let enough_collators_for_all_chain = Self::enough_collators_for_all_chains(
&bulk_paras,
&pool_paras,
target_session_index,
number_of_collators,
collators_per_container,
collators_per_parathread,
);

let mut chains: Vec<_> = bulk_paras.into_iter().chain(pool_paras).collect();

// Prioritize paras by tip on congestion
// As of now this doesn't distinguish between bulk paras and pool paras
if !enough_collators_for_all_chain {
chains.sort_by(|a, b| {
T::CollatorAssignmentTip::get_para_tip(b.para_id)
.cmp(&T::CollatorAssignmentTip::get_para_tip(a.para_id))
});
}

(chains, !enough_collators_for_all_chain)
}

/// Assign new collators
/// collators should be queued collators
pub fn assign_collators(
current_session_index: &T::SessionIndex,
random_seed: [u8; 32],
collators: Vec<T::AccountId>,
) -> SessionChangeOutcome<T> {
let maybe_core_allocation_configuration = T::CoreAllocationConfiguration::get();
// We work with one session delay to calculate assignments
let session_delay = T::SessionIndex::one();
let target_session_index = current_session_index.saturating_add(session_delay);

let collators_per_container =
T::HostConfiguration::collators_per_container(target_session_index);
let collators_per_parathread =
T::HostConfiguration::collators_per_parathread(target_session_index);

// We get the containerChains that we will have at the target session
let container_chains =
T::ContainerChains::session_container_chains(target_session_index);
Expand Down Expand Up @@ -201,7 +323,7 @@ pub mod pallet {
let mut shuffle_collators = None;
// If the random_seed is all zeros, we don't shuffle the list of collators nor the list
// of container chains.
// This should only happen in tests, and in the genesis block.
// This should only happen in tests_without_core_config, and in the genesis block.
if random_seed != [0; 32] {
let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed);
container_chain_ids.shuffle(&mut rng);
Expand Down Expand Up @@ -234,45 +356,45 @@ pub mod pallet {
// Chains will not be assigned less than `min_collators`, except the orchestrator chain.
// First all chains will be assigned `min_collators`, and then the first one will be assigned up to `max`,
// then the second one, and so on.
let mut chains = vec![];
let collators_per_container =
T::HostConfiguration::collators_per_container(target_session_index);
let mut bulk_paras = vec![];
let mut pool_paras = vec![];

for para_id in &container_chain_ids {
chains.push(ChainNumCollators {
bulk_paras.push(ChainNumCollators {
para_id: *para_id,
min_collators: collators_per_container,
max_collators: collators_per_container,
});
}
let collators_per_parathread =
T::HostConfiguration::collators_per_parathread(target_session_index);
for para_id in &parathreads {
chains.push(ChainNumCollators {
pool_paras.push(ChainNumCollators {
para_id: *para_id,
min_collators: collators_per_parathread,
max_collators: collators_per_parathread,
});
}

// Are there enough collators to satisfy the minimum demand?
let enough_collators_for_all_chain = collators.len() as u32
>= T::HostConfiguration::min_collators_for_orchestrator(target_session_index)
.saturating_add(
collators_per_container.saturating_mul(container_chain_ids.len() as u32),
let (chains, need_to_charge_tip) =
if let Some(core_allocation_configuration) = maybe_core_allocation_configuration {
Self::order_paras_with_core_config(
bulk_paras,
pool_paras,
&core_allocation_configuration,
target_session_index,
collators.len() as u32,
collators_per_container,
collators_per_parathread,
)
.saturating_add(
collators_per_parathread.saturating_mul(parathreads.len() as u32),
);

// Prioritize paras by tip on congestion
// As of now this doesn't distinguish between parachains and parathreads
// TODO apply different logic to parathreads
if !enough_collators_for_all_chain {
chains.sort_by(|a, b| {
T::CollatorAssignmentTip::get_para_tip(b.para_id)
.cmp(&T::CollatorAssignmentTip::get_para_tip(a.para_id))
});
}
} else {
Self::order_paras(
bulk_paras,
pool_paras,
target_session_index,
collators.len() as u32,
collators_per_container,
collators_per_parathread,
)
};

// We assign new collators
// we use the config scheduled at the target_session_index
Expand Down Expand Up @@ -334,7 +456,7 @@ pub mod pallet {
assigned_containers.retain(|_, v| !v.is_empty());

// On congestion, prioritized chains need to pay the minimum tip of the prioritized chains
let maybe_tip: Option<BalanceOf<T>> = if enough_collators_for_all_chain {
let maybe_tip: Option<BalanceOf<T>> = if !need_to_charge_tip {
None
} else {
assigned_containers
Expand Down
38 changes: 36 additions & 2 deletions pallets/collator-assignment/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
use {
crate::{
self as pallet_collator_assignment, pallet::CollatorContainerChain,
GetRandomnessForNextBlock, RotateCollatorsEveryNSessions,
CoreAllocationConfiguration, GetRandomnessForNextBlock, RotateCollatorsEveryNSessions,
},
frame_support::{
parameter_types,
Expand Down Expand Up @@ -101,6 +101,10 @@ pub mod mock_data {
#[pallet::storage]
pub(super) type Mock<T: Config> = StorageValue<_, Mocks, ValueQuery>;

#[pallet::storage]
pub(super) type MockCoreAllocationConfiguration<T: Config> =
StorageValue<_, CoreAllocationConfiguration, OptionQuery>;

impl<T: Config> Pallet<T> {
pub fn mock() -> Mocks {
Mock::<T>::get()
Expand All @@ -111,6 +115,14 @@ pub mod mock_data {
{
Mock::<T>::mutate(f)
}

pub fn core_allocation_config() -> Option<CoreAllocationConfiguration> {
MockCoreAllocationConfiguration::<T>::get()
}

pub fn set_core_allocation_config(config: Option<CoreAllocationConfiguration>) {
MockCoreAllocationConfiguration::<T>::set(config);
}
}
}

Expand All @@ -135,6 +147,7 @@ pub struct Mocks {
pub container_chains: Vec<u32>,
pub parathreads: Vec<u32>,
pub random_seed: [u8; 32],
pub chains_that_are_tipping: Vec<ParaId>,
// None means 5
pub full_rotation_period: Option<u32>,
pub apply_tip: bool,
Expand All @@ -155,6 +168,7 @@ impl Default for Mocks {
container_chains: Default::default(),
parathreads: Default::default(),
random_seed: Default::default(),
chains_that_are_tipping: vec![1003.into(), 1004.into()],
full_rotation_period: Default::default(),
apply_tip: Default::default(),
assignment_hook_errors: Default::default(),
Expand Down Expand Up @@ -197,6 +211,10 @@ impl pallet_collator_assignment::GetHostConfiguration<u32> for HostConfiguration
MockData::mock().target_container_chain_fullness
}

fn max_parachain_cores_percentage(_session_index: u32) -> Option<Perbill> {
None
}

#[cfg(feature = "runtime-benchmarks")]
fn set_host_configuration(_session_index: u32) {
MockData::mutate(|mocks| {
Expand Down Expand Up @@ -284,7 +302,8 @@ pub struct MockCollatorAssignmentTip;

impl CollatorAssignmentTip<u32> for MockCollatorAssignmentTip {
fn get_para_tip(para_id: ParaId) -> Option<u32> {
if MockData::mock().apply_tip && (para_id == 1003u32.into() || para_id == 1004u32.into()) {
if MockData::mock().apply_tip && MockData::mock().chains_that_are_tipping.contains(&para_id)
{
Some(1_000u32)
} else {
None
Expand All @@ -309,6 +328,20 @@ impl CollatorAssignmentHook<u32> for MockCollatorAssignmentHook {
}
}

pub struct GetCoreAllocationConfigurationImpl;

impl GetCoreAllocationConfigurationImpl {
pub fn set(config: Option<CoreAllocationConfiguration>) {
MockData::set_core_allocation_config(config);
}
}

impl Get<Option<CoreAllocationConfiguration>> for GetCoreAllocationConfigurationImpl {
fn get() -> Option<CoreAllocationConfiguration> {
MockData::core_allocation_config()
}
}

impl pallet_collator_assignment::Config for Test {
type RuntimeEvent = RuntimeEvent;
type SessionIndex = u32;
Expand All @@ -324,6 +357,7 @@ impl pallet_collator_assignment::Config for Test {
type CollatorAssignmentTip = MockCollatorAssignmentTip;
type ForceEmptyOrchestrator = ConstBool<false>;
type Currency = ();
type CoreAllocationConfiguration = GetCoreAllocationConfigurationImpl;
type WeightInfo = ();
}

Expand Down
1 change: 1 addition & 0 deletions pallets/collator-assignment/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use {
mod assign_full;
mod prioritize_invulnerables;
mod select_chains;
mod with_core_config;

#[test]
fn assign_initial_collators() {
Expand Down
Loading

0 comments on commit 3d57f42

Please sign in to comment.