-
-
Notifications
You must be signed in to change notification settings - Fork 48
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* add configuration state * start updating to 23w31a * implement a bit more of 23w31a * chunk batching * start adding configuration state * ioasfhjgsd * almost works * configuration state mostly implemented * handle other packets in configuration state and fix keepalive * cleanup, fix warnings * 23w32a * fix some doctests * 23w33a * 23w35a * 1.20.2-pre2 * fix system conflicts * 1.20.2-pre4 * make tests compile * tests pass * 1.20.2-rc2 * 1.20.2 * Revert "1.20.2" This reverts commit dd152fd. * didn't mean to commit that code --------- Co-authored-by: mat <git@matdoes.dev>
- Loading branch information
Showing
85 changed files
with
3,100 additions
and
1,873 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
//! Used for Minecraft's chunk batching introduced in 23w31a (1.20.2). It's used | ||
//! for making the server spread out how often it sends us chunk packets | ||
//! depending on our receiving speed. | ||
use std::time::{Duration, Instant}; | ||
|
||
use azalea_protocol::packets::game::serverbound_chunk_batch_received_packet::ServerboundChunkBatchReceivedPacket; | ||
use bevy_app::{App, Plugin, Update}; | ||
use bevy_ecs::prelude::*; | ||
|
||
use crate::{ | ||
interact::handle_block_interact_event, | ||
inventory::InventorySet, | ||
local_player::{handle_send_packet_event, SendPacketEvent}, | ||
respawn::perform_respawn, | ||
}; | ||
|
||
pub struct ChunkBatchingPlugin; | ||
impl Plugin for ChunkBatchingPlugin { | ||
fn build(&self, app: &mut App) { | ||
app.add_systems( | ||
Update, | ||
( | ||
handle_chunk_batch_start_event, | ||
handle_chunk_batch_finished_event, | ||
) | ||
.chain() | ||
.before(handle_send_packet_event) | ||
.before(InventorySet) | ||
.before(handle_block_interact_event) | ||
.before(perform_respawn), | ||
) | ||
.add_event::<ChunkBatchStartEvent>() | ||
.add_event::<ChunkBatchFinishedEvent>(); | ||
} | ||
} | ||
|
||
#[derive(Component, Clone, Debug)] | ||
pub struct ChunkBatchInfo { | ||
pub start_time: Instant, | ||
pub accumulator: ChunkReceiveSpeedAccumulator, | ||
} | ||
|
||
#[derive(Event)] | ||
pub struct ChunkBatchStartEvent { | ||
pub entity: Entity, | ||
} | ||
#[derive(Event)] | ||
pub struct ChunkBatchFinishedEvent { | ||
pub entity: Entity, | ||
pub batch_size: u32, | ||
} | ||
|
||
pub fn handle_chunk_batch_start_event( | ||
mut query: Query<&mut ChunkBatchInfo>, | ||
mut events: EventReader<ChunkBatchStartEvent>, | ||
) { | ||
for event in events.iter() { | ||
if let Ok(mut chunk_batch_info) = query.get_mut(event.entity) { | ||
chunk_batch_info.start_time = Instant::now(); | ||
} | ||
} | ||
} | ||
|
||
pub fn handle_chunk_batch_finished_event( | ||
mut query: Query<&mut ChunkBatchInfo>, | ||
mut events: EventReader<ChunkBatchFinishedEvent>, | ||
mut send_packets: EventWriter<SendPacketEvent>, | ||
) { | ||
for event in events.iter() { | ||
if let Ok(mut chunk_batch_info) = query.get_mut(event.entity) { | ||
let batch_duration = chunk_batch_info.start_time.elapsed(); | ||
if event.batch_size > 0 { | ||
chunk_batch_info | ||
.accumulator | ||
.accumulate(event.batch_size, batch_duration); | ||
} | ||
let millis_per_chunk = | ||
f64::max(0., chunk_batch_info.accumulator.get_millis_per_chunk()); | ||
let desired_chunks_per_tick = if millis_per_chunk == 0. { | ||
// make it the server's problem instead | ||
f32::NAN | ||
} else { | ||
(25. / millis_per_chunk) as f32 | ||
}; | ||
send_packets.send(SendPacketEvent { | ||
entity: event.entity, | ||
packet: ServerboundChunkBatchReceivedPacket { | ||
desired_chunks_per_tick, | ||
} | ||
.get(), | ||
}); | ||
} | ||
} | ||
} | ||
|
||
#[derive(Clone, Debug)] | ||
pub struct ChunkReceiveSpeedAccumulator { | ||
batch_sizes: Vec<u32>, | ||
/// as milliseconds | ||
batch_durations: Vec<u32>, | ||
index: usize, | ||
filled_size: usize, | ||
} | ||
impl ChunkReceiveSpeedAccumulator { | ||
pub fn new(capacity: usize) -> Self { | ||
Self { | ||
batch_sizes: vec![0; capacity], | ||
batch_durations: vec![0; capacity], | ||
index: 0, | ||
filled_size: 0, | ||
} | ||
} | ||
|
||
pub fn accumulate(&mut self, batch_size: u32, batch_duration: Duration) { | ||
self.batch_sizes[self.index] = batch_size; | ||
self.batch_durations[self.index] = | ||
f32::clamp(batch_duration.as_millis() as f32, 0., 15000.) as u32; | ||
self.index = (self.index + 1) % self.batch_sizes.len(); | ||
if self.filled_size < self.batch_sizes.len() { | ||
self.filled_size += 1; | ||
} | ||
} | ||
|
||
pub fn get_millis_per_chunk(&self) -> f64 { | ||
let mut total_batch_size = 0; | ||
let mut total_batch_duration = 0; | ||
for i in 0..self.filled_size { | ||
total_batch_size += self.batch_sizes[i]; | ||
total_batch_duration += self.batch_durations[i]; | ||
} | ||
if total_batch_size == 0 { | ||
return 0.; | ||
} | ||
total_batch_duration as f64 / total_batch_size as f64 | ||
} | ||
} | ||
|
||
impl Default for ChunkBatchInfo { | ||
fn default() -> Self { | ||
Self { | ||
start_time: Instant::now(), | ||
accumulator: ChunkReceiveSpeedAccumulator::new(50), | ||
} | ||
} | ||
} |
Oops, something went wrong.