diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e112b0dc..456de85b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,7 +81,7 @@ jobs: - name: Test shell: bash run: | - poetry run coverage run -m pytest --durations=10 -s --traceconfig --log-cli-level=DEBUG tests/ + poetry run coverage run -m pytest --durations=10 poetry run coverage report -m # - name: Archive code coverage results diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 690f6c85..6b37ffe4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,19 +32,6 @@ jobs: - name: Install awpy run: | poetry install --no-interaction - - # - name: Publish to test PyPI - # run: | - # poetry publish --build --repository testpypi --username __token__ --password ${{ secrets.TEST_PYPI_API_TOKEN }} -vvv - - - name: Debug Step - run: | - echo "Listing directory contents" - ls -la - echo "Checking Poetry version" - poetry --version - echo "Checking installed packages" - poetry show - name: Publish to PyPI run: | diff --git a/awpy/demo.py b/awpy/demo.py index 7873aad3..effb500c 100644 --- a/awpy/demo.py +++ b/awpy/demo.py @@ -10,17 +10,18 @@ from demoparser2 import DemoParser # pylint: disable=E0611 from loguru import logger -from awpy.parsers import ( +from awpy.parsers.clock import parse_times +from awpy.parsers.events import ( parse_bomb, parse_damages, parse_grenades, parse_infernos, parse_kills, - parse_rounds, parse_smokes, - parse_ticks, parse_weapon_fires, ) +from awpy.parsers.rounds import parse_rounds +from awpy.parsers.ticks import parse_ticks from awpy.utils import apply_round_num PROP_WARNING_LIMIT = 40 @@ -193,21 +194,40 @@ def _parse_events(self) -> None: raise ValueError(no_events_error_msg) if self.parse_rounds is True: - self.rounds = parse_rounds(self.parser) + self.rounds = parse_rounds( + self.parser, self.events + ) # Must pass parser for round start/end events - self.kills = apply_round_num(self.rounds, parse_kills(self.events)) - self.damages = apply_round_num(self.rounds, parse_damages(self.events)) - self.bomb = apply_round_num(self.rounds, parse_bomb(self.events)) - self.smokes = apply_round_num( - self.rounds, parse_smokes(self.events), tick_col="start_tick" + self.kills = parse_times( + apply_round_num(self.rounds, parse_kills(self.events)), self.rounds ) - self.infernos = apply_round_num( - self.rounds, parse_infernos(self.events), tick_col="start_tick" + self.damages = parse_times( + apply_round_num(self.rounds, parse_damages(self.events)), self.rounds ) - self.weapon_fires = apply_round_num( - self.rounds, parse_weapon_fires(self.events) + self.bomb = parse_times( + apply_round_num(self.rounds, parse_bomb(self.events)), self.rounds + ) + self.smokes = parse_times( + apply_round_num( + self.rounds, parse_smokes(self.events), tick_col="start_tick" + ), + self.rounds, + tick_col="start_tick", + ) + self.infernos = parse_times( + apply_round_num( + self.rounds, parse_infernos(self.events), tick_col="start_tick" + ), + self.rounds, + tick_col="start_tick", + ) + self.weapon_fires = parse_times( + apply_round_num(self.rounds, parse_weapon_fires(self.events)), + self.rounds, + ) + self.grenades = parse_times( + apply_round_num(self.rounds, parse_grenades(self.parser)), self.rounds ) - self.grenades = apply_round_num(self.rounds, parse_grenades(self.parser)) # Parse ticks if self.parse_ticks is True: diff --git a/awpy/parsers/__init__.py b/awpy/parsers/__init__.py new file mode 100644 index 00000000..61b3aea9 --- /dev/null +++ b/awpy/parsers/__init__.py @@ -0,0 +1 @@ +"""Module for specific parsing functions.""" diff --git a/awpy/parsers/clock.py b/awpy/parsers/clock.py new file mode 100644 index 00000000..a38e4d38 --- /dev/null +++ b/awpy/parsers/clock.py @@ -0,0 +1,120 @@ +"""Module for time and clock parsing functions.""" + +import math +from typing import Literal, Union + +import pandas as pd + +ROUND_START_DEFAULT_TIME_IN_SECS = 20 +FREEZE_DEFAULT_TIME_IN_SECS = 115 +BOMB_DEFAULT_TIME_IN_SECS = 40 + + +def parse_clock( + seconds_since_phase_change: int, + max_time_ticks: Union[Literal["start", "freeze", "bomb"], int], + tick_rate: int = 64, +) -> str: + """Parse the remaining time in a round or phase to a clock string. + + Args: + seconds_since_phase_change (int): The number of seconds since the phase change. + max_time_ticks (Union[Literal['start', 'freeze', 'bomb'], int]): The maximum + time in ticks for the phase. + tick_rate (int, optional): The tick rate of the server. Defaults to 64. + + Returns: + str: The remaining time in MM:SS format. + """ + if max_time_ticks == "start": + max_time_ticks = ROUND_START_DEFAULT_TIME_IN_SECS * tick_rate + elif max_time_ticks == "freeze": + max_time_ticks = FREEZE_DEFAULT_TIME_IN_SECS * tick_rate + elif max_time_ticks == "bomb": + max_time_ticks = BOMB_DEFAULT_TIME_IN_SECS * tick_rate + + # Calculate the remaining time in ticks + remaining_ticks = max_time_ticks - seconds_since_phase_change + + # Convert remaining ticks to total seconds + remaining_seconds = remaining_ticks / tick_rate + + # Round up the seconds + remaining_seconds = math.ceil(remaining_seconds) + + # Calculate minutes and seconds + minutes = remaining_seconds // 60 + seconds = remaining_seconds % 60 + + # Format as MM:SS with leading zeros + return f"{int(minutes):02}:{int(seconds):02}" + + +def _find_clock_time(row: pd.Series) -> str: + """Find the clock time for a row. + + Args: + row: A row from a dataframe with ticks_since_* columns. + """ + times = { + "start": row["ticks_since_round_start"], + "freeze": row["ticks_since_freeze_time_end"], + "bomb": row["ticks_since_bomb_plant"], + } + # Filter out NA values and find the key with the minimum value + min_key = min((k for k in times if pd.notna(times[k])), key=lambda k: times[k]) + return parse_clock(times[min_key], min_key) + + +def parse_times( + df: pd.DataFrame, rounds_df: pd.DataFrame, tick_col: str = "tick" +) -> pd.DataFrame: + """Adds time_since_* columns to the dataframe. + + Args: + df (pd.DataFrame): The dataframe to add the time columns to. + rounds_df (pd.DataFrame): The rounds dataframe. + tick_col (str): The column name of the tick column. + + Returns: + pd.DataFrame: The dataframe with the timesince_* columns added. + """ + if tick_col not in df.columns: + tick_col_missing_msg = f"{tick_col} not found in dataframe." + raise ValueError(tick_col_missing_msg) + + df_with_round_info = df.merge(rounds_df, on="round", how="left") + df_with_round_info["ticks_since_round_start"] = ( + df_with_round_info[tick_col] - df_with_round_info["start"] + ) + df_with_round_info["ticks_since_freeze_time_end"] = ( + df_with_round_info[tick_col] - df_with_round_info["freeze_end"] + ) + df_with_round_info["ticks_since_bomb_plant"] = ( + df_with_round_info[tick_col] - df_with_round_info["bomb_plant"] + ) + + # Apply the function to the selected columns + for col in df_with_round_info.columns: + if col.startswith("ticks_since_"): + df_with_round_info[col] = ( + df_with_round_info[col] + .map(lambda x: pd.NA if x < 0 else x) + .astype(pd.Int64Dtype()) + ) + + df_with_round_info = df_with_round_info.drop( + columns=[ + "start", + "freeze_end", + "end", + "official_end", + "winner", + "reason", + "bomb_plant", + ] + ) + + df_with_round_info["clock"] = df_with_round_info.apply(_find_clock_time, axis=1) + + return df_with_round_info diff --git a/awpy/parsers.py b/awpy/parsers/events.py similarity index 69% rename from awpy/parsers.py rename to awpy/parsers/events.py index 97a24a7d..0b3e82bd 100644 --- a/awpy/parsers.py +++ b/awpy/parsers/events.py @@ -1,4 +1,4 @@ -"""Contains parsers for the different pieces of data.""" +"""Module for event parsing functions.""" import numpy as np import pandas as pd @@ -8,80 +8,8 @@ from awpy.converters import ( map_hitgroup, ) - - -def parse_col_types(df: pd.DataFrame) -> pd.DataFrame: - """Parse the column types of a dataframe. - - Args: - df: A pandas DataFrame. - - Returns: - A DataFrame with the column types. - """ - for col in df.columns: - # SteamIDs should be ints - if "steamid" in col: - df[col] = df[col].astype(str) - return df - - -def remove_nonplay_ticks(parsed_df: pd.DataFrame) -> pd.DataFrame: - """Filter out non-play records from a dataframe. - - Args: - parsed_df (pd.DataFrame): A dataframe with the columns... - - Returns: - pd.DataFrame: A dataframe with the non-play records removed. - """ - # Check if the required columns are in the dataframe - for col in [ - "is_freeze_period", - "is_warmup_period", - "is_terrorist_timeout", - "is_ct_timeout", - "is_technical_timeout", - "is_waiting_for_resume", - "is_match_started", - "game_phase", - ]: - if col not in parsed_df.columns: - error_msg = f"{col} not found in dataframe." - raise ValueError(error_msg) - - # Remove records which do not occur in-play - parsed_df = parsed_df[ - (~parsed_df["is_freeze_period"]) - & (~parsed_df["is_warmup_period"]) - & (~parsed_df["is_terrorist_timeout"]) - & (~parsed_df["is_ct_timeout"]) - & (~parsed_df["is_technical_timeout"]) - & (~parsed_df["is_waiting_for_resume"]) - & (parsed_df["is_match_started"]) - & ( - parsed_df["game_phase"].isin( - [ - 2, # startgame - 3, # preround - ] - ) - ) - ] - - # Drop the state columns - return parsed_df.drop( - columns=[ - "is_freeze_period", - "is_warmup_period", - "is_terrorist_timeout", - "is_ct_timeout", - "is_technical_timeout", - "is_waiting_for_resume", - "is_match_started", - "game_phase", - ] - ) +from awpy.parsers.ticks import remove_nonplay_ticks +from awpy.parsers.utils import parse_col_types def parse_grenades(parser: DemoParser) -> pd.DataFrame: @@ -109,120 +37,6 @@ def parse_grenades(parser: DemoParser) -> pd.DataFrame: ] -def parse_rounds(parser: DemoParser) -> pd.DataFrame: - """Parse the rounds of the demofile. - - Args: - parser: The parser object. - - Returns: - The rounds for the demofile. - - Raises: - KeyError: If a round-related event is not found in the events. - """ - round_start = parser.parse_event("round_start") - if len(round_start) == 0: - round_start_missing_msg = "round_start not found in events." - raise KeyError(round_start_missing_msg) - round_start["event"] = "start" - - round_end = parser.parse_event("round_end") - if len(round_end) == 0: - round_end_missing_msg = "round_end not found in events." - raise KeyError(round_end_missing_msg) - round_end = round_end[~round_end["winner"].isna()] # Remove None round ends - round_end["event"] = "end" - - round_end_official = parser.parse_event("round_officially_ended") - if len(round_end_official) == 0: - round_end_official_missing_msg = "round_officially_ended not found in events." - raise KeyError(round_end_official_missing_msg) - round_end_official["event"] = "official_end" - - round_freeze_end = parser.parse_event("round_freeze_end") - if len(round_freeze_end) == 0: - round_freeze_end_missing_msg = "round_freeze_end not found in events." - raise KeyError(round_freeze_end_missing_msg) - round_freeze_end["event"] = "freeze_end" - - rounds = pd.concat( - [ - round_start[["event", "tick"]], - round_freeze_end[["event", "tick"]], - round_end[["event", "tick"]], - round_end_official[["event", "tick"]], - ] - ) - - # Remove everything that happen on tick 0, except starts - rounds = rounds[~((rounds["tick"] == 0) & (rounds["event"] != "start"))] - - # Then, order - event_order = ["official_end", "start", "freeze_end", "end"] - rounds["event"] = pd.Categorical( - rounds["event"], categories=event_order, ordered=True - ) - rounds = ( - rounds.sort_values(by=["tick", "event"]) - .drop_duplicates() - .reset_index(drop=True) - ) - - # Initialize an empty list to store the indices of rows to keep - indices_to_keep = [] - - # Loop through the DataFrame and check for the correct order of events - full_sequence_offset = len(event_order) - for i in range(len(rounds)): - # Extract the current sequence of events - current_sequence = rounds["event"].iloc[i : i + full_sequence_offset].tolist() - # Check if the current sequence matches the correct order - if current_sequence == ["start", "freeze_end", "end", "official_end"]: - # If it does, add the indices of these rows to the list - indices_to_keep.extend(range(i, i + full_sequence_offset)) - # Case for end of match where we might not get a round official end - # Case for start of match where we might not get a freeze end - elif current_sequence == ["start", "freeze_end", "end"] or current_sequence[ - 0 : full_sequence_offset - 1 - ] == [ - "start", - "end", - "official_end", - ]: - indices_to_keep.extend(range(i, i + full_sequence_offset - 1)) - - # Filter the DataFrame to keep only the rows with the correct sequence - rounds_filtered = rounds.loc[indices_to_keep].reset_index(drop=True) - rounds_filtered["round"] = (rounds_filtered["event"] == "start").cumsum() - rounds_reshaped = rounds_filtered.pivot_table( - index="round", columns="event", values="tick", aggfunc="first", observed=False - ).reset_index(drop=True) - rounds_reshaped = rounds_reshaped[ - ["start", "freeze_end", "end", "official_end"] - ].astype("Int32") - rounds_reshaped.columns = ["start", "freeze_end", "end", "official_end"] - rounds_reshaped = rounds_reshaped.merge( - round_end[ - [ - "tick", - "winner", - "reason", - ] - ], - left_on="end", - right_on="tick", - how="left", - ) - rounds_reshaped["round"] = rounds_reshaped.index + 1 - rounds_reshaped["official_end"] = rounds_reshaped["official_end"].fillna( - rounds_reshaped["end"] - ) - return rounds_reshaped[ - ["round", "start", "freeze_end", "end", "official_end", "winner", "reason"] - ] - - def parse_kills(events: dict[str, pd.DataFrame]) -> pd.DataFrame: """Parse the kills of the demofile. @@ -701,20 +515,3 @@ def parse_weapon_fires(events: dict[str, pd.DataFrame]) -> pd.DataFrame: columns={col: col.replace("user_", "player_")} ) return weapon_fires_df - - -def parse_ticks( - parser: DemoParser, player_props: list[str], other_props: list[str] -) -> pd.DataFrame: - """Parse the ticks of the demofile. - - Args: - parser (DemoParser): The parser object. - player_props (list[str]): Player properties to parse. - other_props (list[str]): World properties to parse. - - Returns: - pd.DataFrame: The ticks for the demofile. - """ - ticks = parser.parse_ticks(wanted_props=player_props + other_props) - return parse_col_types(remove_nonplay_ticks(ticks)) diff --git a/awpy/parsers/rounds.py b/awpy/parsers/rounds.py new file mode 100644 index 00000000..651cbb95 --- /dev/null +++ b/awpy/parsers/rounds.py @@ -0,0 +1,153 @@ +"""Module for round parsing functions.""" + +from typing import Union + +import numpy as np +import pandas as pd +from demoparser2 import DemoParser # pylint: disable=E0611 + + +def _find_bomb_plant_tick(row: pd.Series, bomb_ticks: pd.Series) -> Union[int, float]: + """Find the bomb plant tick for a round. + + Args: + row: A row from a dataframe + bomb_ticks: A series of bomb ticks + + Returns: + The bomb plant tick for the round, or NaN if no bomb plant was found. + """ + # Filter the bomb ticks that fall within the round's start and end + plant_ticks = bomb_ticks[(bomb_ticks >= row["start"]) & (bomb_ticks <= row["end"])] + # Return the first bomb plant tick if it exists, otherwise NaN + return plant_ticks.iloc[0] if not plant_ticks.empty else np.nan + + +def parse_rounds(parser: DemoParser, events: dict[str, pd.DataFrame]) -> pd.DataFrame: + """Parse the rounds of the demofile. + + Args: + parser: The parser object. + events: A dictionary of parsed events. + + Returns: + The rounds for the demofile. + + Raises: + KeyError: If a round-related event is not found in the events. + """ + round_start = parser.parse_event("round_start") + if len(round_start) == 0: + round_start_missing_msg = "round_start not found in events." + raise KeyError(round_start_missing_msg) + round_start["event"] = "start" + + round_end = parser.parse_event("round_end") + if len(round_end) == 0: + round_end_missing_msg = "round_end not found in events." + raise KeyError(round_end_missing_msg) + round_end = round_end[~round_end["winner"].isna()] # Remove None round ends + round_end["event"] = "end" + + round_end_official = parser.parse_event("round_officially_ended") + if len(round_end_official) == 0: + round_end_official_missing_msg = "round_officially_ended not found in events." + raise KeyError(round_end_official_missing_msg) + round_end_official["event"] = "official_end" + + round_freeze_end = parser.parse_event("round_freeze_end") + if len(round_freeze_end) == 0: + round_freeze_end_missing_msg = "round_freeze_end not found in events." + raise KeyError(round_freeze_end_missing_msg) + round_freeze_end["event"] = "freeze_end" + + rounds = pd.concat( + [ + round_start[["event", "tick"]], + round_freeze_end[["event", "tick"]], + round_end[["event", "tick"]], + round_end_official[["event", "tick"]], + ] + ) + + # Remove everything that happen on tick 0, except starts + rounds = rounds[~((rounds["tick"] == 0) & (rounds["event"] != "start"))] + + # Then, order + event_order = ["official_end", "start", "freeze_end", "end"] + rounds["event"] = pd.Categorical( + rounds["event"], categories=event_order, ordered=True + ) + rounds = ( + rounds.sort_values(by=["tick", "event"]) + .drop_duplicates() + .reset_index(drop=True) + ) + + # Initialize an empty list to store the indices of rows to keep + indices_to_keep = [] + + # Loop through the DataFrame and check for the correct order of events + full_sequence_offset = len(event_order) + for i in range(len(rounds)): + # Extract the current sequence of events + current_sequence = rounds["event"].iloc[i : i + full_sequence_offset].tolist() + # Check if the current sequence matches the correct order + if current_sequence == ["start", "freeze_end", "end", "official_end"]: + # If it does, add the indices of these rows to the list + indices_to_keep.extend(range(i, i + full_sequence_offset)) + # Case for end of match where we might not get a round official end + # Case for start of match where we might not get a freeze end + elif current_sequence == ["start", "freeze_end", "end"] or current_sequence[ + 0 : full_sequence_offset - 1 + ] == [ + "start", + "end", + "official_end", + ]: + indices_to_keep.extend(range(i, i + full_sequence_offset - 1)) + + # Filter the DataFrame to keep only the rows with the correct sequence + rounds_filtered = rounds.loc[indices_to_keep].reset_index(drop=True) + rounds_filtered["round"] = (rounds_filtered["event"] == "start").cumsum() + rounds_reshaped = rounds_filtered.pivot_table( + index="round", columns="event", values="tick", aggfunc="first", observed=False + ).reset_index(drop=True) + rounds_reshaped = rounds_reshaped[ + ["start", "freeze_end", "end", "official_end"] + ].astype("Int32") + rounds_reshaped.columns = ["start", "freeze_end", "end", "official_end"] + rounds_reshaped = rounds_reshaped.merge( + round_end[ + [ + "tick", + "winner", + "reason", + ] + ], + left_on="end", + right_on="tick", + how="left", + ) + rounds_reshaped["round"] = rounds_reshaped.index + 1 + rounds_reshaped["official_end"] = rounds_reshaped["official_end"].fillna( + rounds_reshaped["end"] + ) + + # Subset round columns + rounds_df = rounds_reshaped[ + ["round", "start", "freeze_end", "end", "official_end", "winner", "reason"] + ] + rounds_df["bomb_plant"] = pd.NA + rounds_df["bomb_plant"] = rounds_df["bomb_plant"].astype(pd.Int64Dtype()) + + # Find the bomb plant ticks + bomb_planted = events.get("bomb_planted") + if bomb_planted.shape[0] == 0: + return rounds_df + + rounds_df["bomb_plant"] = rounds_df.apply( + _find_bomb_plant_tick, bomb_ticks=bomb_planted["tick"], axis=1 + ).astype(pd.Int64Dtype()) + + return rounds_df diff --git a/awpy/parsers/ticks.py b/awpy/parsers/ticks.py new file mode 100644 index 00000000..ad7e4352 --- /dev/null +++ b/awpy/parsers/ticks.py @@ -0,0 +1,83 @@ +"""Module for tick parsing functions.""" + +import pandas as pd +from demoparser2 import DemoParser # pylint: disable=E0611 + +from awpy.parsers.utils import parse_col_types + + +def remove_nonplay_ticks(parsed_df: pd.DataFrame) -> pd.DataFrame: + """Filter out non-play records from a dataframe. + + Args: + parsed_df (pd.DataFrame): A dataframe with the columns... + + Returns: + pd.DataFrame: A dataframe with the non-play records removed. + """ + # Check if the required columns are in the dataframe + for col in [ + "is_freeze_period", + "is_warmup_period", + "is_terrorist_timeout", + "is_ct_timeout", + "is_technical_timeout", + "is_waiting_for_resume", + "is_match_started", + "game_phase", + ]: + if col not in parsed_df.columns: + error_msg = f"{col} not found in dataframe." + raise ValueError(error_msg) + + # Remove records which do not occur in-play + parsed_df = parsed_df[ + (~parsed_df["is_freeze_period"]) + & (~parsed_df["is_warmup_period"]) + & (~parsed_df["is_terrorist_timeout"]) + & (~parsed_df["is_ct_timeout"]) + & (~parsed_df["is_technical_timeout"]) + & (~parsed_df["is_waiting_for_resume"]) + & (parsed_df["is_match_started"]) + & ( + parsed_df["game_phase"].isin( + [ + 2, # startgame + 3, # preround + ] + ) + ) + ] + + # Drop the state columns + return parsed_df.drop( + columns=[ + "is_freeze_period", + "is_warmup_period", + "is_terrorist_timeout", + "is_ct_timeout", + "is_technical_timeout", + "is_waiting_for_resume", + "is_match_started", + "game_phase", + ] + ) + + +def parse_ticks( + parser: DemoParser, + player_props: list[str], + other_props: list[str], +) -> pd.DataFrame: + """Parse the ticks of the demofile. + + Args: + parser (DemoParser): The parser object. + player_props (list[str]): Player properties to parse. + other_props (list[str]): World properties to parse. + + Returns: + pd.DataFrame: The ticks for the demofile. + """ + ticks_df = parser.parse_ticks(wanted_props=player_props + other_props) + return parse_col_types(remove_nonplay_ticks(ticks_df)) diff --git a/awpy/parsers/utils.py b/awpy/parsers/utils.py new file mode 100644 index 00000000..e685ba12 --- /dev/null +++ b/awpy/parsers/utils.py @@ -0,0 +1,19 @@ +"""Module for parsing utils.""" + +import pandas as pd + + +def parse_col_types(df: pd.DataFrame) -> pd.DataFrame: + """Parse the column types of a dataframe. + + Args: + df: A pandas DataFrame. + + Returns: + A DataFrame with the column types. + """ + for col in df.columns: + # SteamIDs should be ints + if "steamid" in col: + df[col] = df[col].astype(str) + return df diff --git a/docs/conf.py b/docs/conf.py index 8fbd3c4b..0c99fe1a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,7 @@ # The short X.Y version version = "" # The full version, including alpha/beta/rc tags -release = "2.0.0a9" +release = "2.0.0b1" # -- General configuration --------------------------------------------------- diff --git a/docs/data.rst b/docs/data.rst index c06b5d3e..d0e71358 100644 --- a/docs/data.rst +++ b/docs/data.rst @@ -7,9 +7,7 @@ This module contains both data, primarily related to map (e.g., images, navigati from awpy.data.map_data import MAP_DATA -`MAP_DATA` is a dictionary where the top-level keys are map names (strings) and the next-level keys are scaling properties for the map. - -By running `NAV["de_dust2"][1213]`, we would see +`MAP_DATA` is a dictionary where the top-level keys are map names (strings) and the next-level keys are scaling properties for the map. Below, we show an example for one map. .. code-block:: json diff --git a/poetry.lock b/poetry.lock index c33ed6ec..100884a2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1476,56 +1476,47 @@ files = [ [[package]] name = "numpy" -version = "2.0.0" +version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, - {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, - {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, - {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, - {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, - {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, - {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, - {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, - {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, - {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] [[package]] @@ -2791,4 +2782,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.9" -content-hash = "771c4cb2f939a83496bb8c8aa21f6910771064f20240261dbd0fd66f2e9341b3" +content-hash = "70fd1f1ba91fcd4a9d6b931ac4c8bfb2211e515c611cb5389641aa73edece6ea" diff --git a/pyproject.toml b/pyproject.toml index 9750acee..3d82d69d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "awpy" -version = "2.0.0a9" +version = "2.0.0b1" description = "Counter-Strike 2 demo parsing, analysis and visualization" readme = "README.md" authors = [ @@ -32,7 +32,7 @@ python = ">=3.9" click = ">=8.1.7" loguru = ">=0.7.2" matplotlib = ">=3.9.0" -numpy = ">=2.0.0" +numpy = "^1.26.4" pandas = ">=2.2.2" setuptools = ">=70.1.0" demoparser2 = ">=0.26.2" diff --git a/tests/test_cli.py b/tests/test_cli.py index 2c2660cc..0a0336af 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -16,7 +16,7 @@ class TestCommandLine: @pytest.fixture(autouse=True) def setup_runner(self, setup): # noqa: ANN001, ARG002, PT004 - """Setup CLI runner.""" + """Setup CLI runner. `setup` arg is the pytest setup fixture.""" self.runner = CliRunner() def test_parse_invalid_filepath(self): diff --git a/tests/test_demo.py b/tests/test_demo.py index 09f1ef9a..4377766d 100644 --- a/tests/test_demo.py +++ b/tests/test_demo.py @@ -10,6 +10,18 @@ from awpy.demo import Demo +@pytest.fixture() +def parsed_hltv_demo(): + """Fixture that returns a parsed Demo object.""" + return Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem") + + +@pytest.fixture() +def parsed_hltv_demo_no_rounds(): + """Fixture that returns a parsed Demo object with rounds disabled.""" + return Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem", rounds=False) + + class TestDemo: """Tests the Demo object.""" @@ -18,28 +30,25 @@ def test_invalid_filepath(self): with pytest.raises(FileNotFoundError): Demo("xyz.dem") - def test_hltv_demo(self): + def test_hltv_demo(self, parsed_hltv_demo: Demo): """Test the Demo object with an HLTV demo.""" - parsed_demo = Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem") - assert parsed_demo.header["map_name"] == "de_vertigo" + assert parsed_hltv_demo.header["map_name"] == "de_vertigo" - def test_no_rounds(self): + def test_no_rounds(self, parsed_hltv_demo_no_rounds: Demo): """Test that when you do not parse rounds, there are no top-level dataframes.""" - parsed_demo = Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem", rounds=False) - assert parsed_demo.rounds is None - assert parsed_demo.kills is None - assert parsed_demo.damages is None - assert parsed_demo.bomb is None - assert parsed_demo.smokes is None - assert parsed_demo.infernos is None - assert parsed_demo.weapon_fires is None - assert parsed_demo.rounds is None - assert parsed_demo.grenades is None - - def test_compress(self): + assert parsed_hltv_demo_no_rounds.rounds is None + assert parsed_hltv_demo_no_rounds.kills is None + assert parsed_hltv_demo_no_rounds.damages is None + assert parsed_hltv_demo_no_rounds.bomb is None + assert parsed_hltv_demo_no_rounds.smokes is None + assert parsed_hltv_demo_no_rounds.infernos is None + assert parsed_hltv_demo_no_rounds.weapon_fires is None + assert parsed_hltv_demo_no_rounds.rounds is None + assert parsed_hltv_demo_no_rounds.grenades is None + + def test_compress(self, parsed_hltv_demo: Demo): """Test that the demo is zipped.""" - demo = Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem") - demo.compress() + parsed_hltv_demo.compress() zip_name = "spirit-vs-mouz-m1-vertigo.zip" assert os.path.exists(zip_name) @@ -74,10 +83,9 @@ def test_compress(self): header = json.load(f) assert header["map_name"] == "de_vertigo" - def test_compress_no_rounds(self): + def test_compress_no_rounds(self, parsed_hltv_demo_no_rounds: Demo): """Test that the demo is zipped and no top-level dataframes are generated.""" - demo = Demo(path="tests/spirit-vs-mouz-m1-vertigo.dem", rounds=False) - demo.compress() + parsed_hltv_demo_no_rounds.compress() zip_name = "spirit-vs-mouz-m1-vertigo.zip" assert os.path.exists(zip_name) diff --git a/tests/test_parsers.py b/tests/test_parsers.py index 90fbd297..c335adb3 100644 --- a/tests/test_parsers.py +++ b/tests/test_parsers.py @@ -4,7 +4,9 @@ import pytest from demoparser2 import DemoParser -from awpy.parsers import parse_damages, parse_kills, parse_rounds, remove_nonplay_ticks +from awpy.parsers.events import parse_damages, parse_kills +from awpy.parsers.rounds import parse_rounds +from awpy.parsers.ticks import remove_nonplay_ticks @pytest.fixture(scope="class") @@ -168,9 +170,11 @@ def test_remove_nonplay_ticks(self, parsed_state: pd.DataFrame): assert "event1" in filtered_df["other_data"].to_numpy() assert "event2" in filtered_df["other_data"].to_numpy() - def test_hltv_rounds(self, hltv_parser: DemoParser): + def test_hltv_rounds( + self, hltv_parser: DemoParser, hltv_events: dict[str, pd.DataFrame] + ): """Tests that we can get correct rounds from HLTV demos.""" - hltv_rounds = parse_rounds(hltv_parser) + hltv_rounds = parse_rounds(hltv_parser, hltv_events) assert hltv_rounds.reason.to_numpy().tolist() == [ "ct_killed", "ct_killed",