From 8124385f0d90207d362b3610e6a2973cda233b02 Mon Sep 17 00:00:00 2001 From: Shom770 Date: Wed, 7 Feb 2024 16:30:30 -0500 Subject: [PATCH 01/19] setup falconviz 2024 --- src/Teams.py | 1 - src/data/match_schedule.json | 769 ++++++++++++++++++ src/page_managers/__init__.py | 1 - .../alliance_selection_manager.py | 543 ------------- src/page_managers/custom_graphs_manager.py | 2 +- src/page_managers/event_manager.py | 123 +-- src/page_managers/match_manager.py | 480 +---------- src/page_managers/picklist_manager.py | 6 +- src/page_managers/team_manager.py | 213 +---- src/pages/5_Alliance_Selection.py | 80 -- src/utils/calculated_stats.py | 273 ++++--- src/utils/constants.py | 100 ++- src/utils/functions.py | 25 +- 13 files changed, 995 insertions(+), 1621 deletions(-) create mode 100644 src/data/match_schedule.json delete mode 100644 src/page_managers/alliance_selection_manager.py delete mode 100644 src/pages/5_Alliance_Selection.py diff --git a/src/Teams.py b/src/Teams.py index 8e14d20..9ea8dc8 100644 --- a/src/Teams.py +++ b/src/Teams.py @@ -69,4 +69,3 @@ team_number, type_of_graph=GraphType.POINT_CONTRIBUTIONS ) - diff --git a/src/data/match_schedule.json b/src/data/match_schedule.json new file mode 100644 index 0000000..f38ca2e --- /dev/null +++ b/src/data/match_schedule.json @@ -0,0 +1,769 @@ +[ + { + "match_key": "qm1", + "red_alliance": [ + 3373, + 5954, + 2998 + ], + "blue_alliance": [ + 612, + 2421, + 422 + ] + }, + { + "match_key": "qm2", + "red_alliance": [ + 8592, + 836, + 4286 + ], + "blue_alliance": [ + 9684, + 2199, + 6863 + ] + }, + { + "match_key": "qm3", + "red_alliance": [ + 4099, + 2534, + 1599 + ], + "blue_alliance": [ + 617, + 539, + 8590 + ] + }, + { + "match_key": "qm4", + "red_alliance": [ + 339, + 8326, + 1895 + ], + "blue_alliance": [ + 620, + 540, + 1731 + ] + }, + { + "match_key": "qm5", + "red_alliance": [ + 9709, + 2106, + 1522 + ], + "blue_alliance": [ + 5587, + 4505, + 3136 + ] + }, + { + "match_key": "qm6", + "red_alliance": [ + 1731, + 1599, + 8592 + ], + "blue_alliance": [ + 6863, + 8590, + 9709 + ] + }, + { + "match_key": "qm7", + "red_alliance": [ + 422, + 2106, + 4505 + ], + "blue_alliance": [ + 339, + 1895, + 2421 + ] + }, + { + "match_key": "qm8", + "red_alliance": [ + 836, + 617, + 3136 + ], + "blue_alliance": [ + 620, + 9684, + 5954 + ] + }, + { + "match_key": "qm9", + "red_alliance": [ + 4099, + 5587, + 1522 + ], + "blue_alliance": [ + 612, + 539, + 540 + ] + }, + { + "match_key": "qm10", + "red_alliance": [ + 2998, + 8326, + 2534 + ], + "blue_alliance": [ + 2199, + 3373, + 4286 + ] + }, + { + "match_key": "qm11", + "red_alliance": [ + 8590, + 2199, + 540 + ], + "blue_alliance": [ + 1731, + 422, + 620 + ] + }, + { + "match_key": "qm12", + "red_alliance": [ + 339, + 836, + 539 + ], + "blue_alliance": [ + 612, + 2421, + 2106 + ] + }, + { + "match_key": "qm13", + "red_alliance": [ + 1599, + 4286, + 2998 + ], + "blue_alliance": [ + 9684, + 6863, + 8326 + ] + }, + { + "match_key": "qm14", + "red_alliance": [ + 617, + 9709, + 1522 + ], + "blue_alliance": [ + 4099, + 2534, + 8592 + ] + }, + { + "match_key": "qm15", + "red_alliance": [ + 4505, + 5954, + 5587 + ], + "blue_alliance": [ + 3373, + 1895, + 3136 + ] + }, + { + "match_key": "qm16", + "red_alliance": [ + 8592, + 1522, + 1599 + ], + "blue_alliance": [ + 4099, + 5954, + 6863 + ] + }, + { + "match_key": "qm17", + "red_alliance": [ + 1731, + 620, + 2998 + ], + "blue_alliance": [ + 3136, + 2106, + 2199 + ] + }, + { + "match_key": "qm18", + "red_alliance": [ + 612, + 9709, + 422 + ], + "blue_alliance": [ + 539, + 836, + 8326 + ] + }, + { + "match_key": "qm19", + "red_alliance": [ + 617, + 8590, + 540 + ], + "blue_alliance": [ + 4286, + 1895, + 9684 + ] + }, + { + "match_key": "qm20", + "red_alliance": [ + 2534, + 2421, + 339 + ], + "blue_alliance": [ + 3373, + 5587, + 4505 + ] + }, + { + "match_key": "qm21", + "red_alliance": [ + 540, + 2106, + 2534 + ], + "blue_alliance": [ + 2421, + 1522, + 617 + ] + }, + { + "match_key": "qm22", + "red_alliance": [ + 3373, + 8590, + 1895 + ], + "blue_alliance": [ + 620, + 3136, + 9684 + ] + }, + { + "match_key": "qm23", + "red_alliance": [ + 539, + 6863, + 8326 + ], + "blue_alliance": [ + 4099, + 8592, + 836 + ] + }, + { + "match_key": "qm24", + "red_alliance": [ + 1599, + 339, + 4505 + ], + "blue_alliance": [ + 1731, + 9709, + 2998 + ] + }, + { + "match_key": "qm25", + "red_alliance": [ + 612, + 4286, + 5954 + ], + "blue_alliance": [ + 422, + 2199, + 5587 + ] + }, + { + "match_key": "qm26", + "red_alliance": [ + 836, + 1522, + 5954 + ], + "blue_alliance": [ + 1731, + 8590, + 612 + ] + }, + { + "match_key": "qm27", + "red_alliance": [ + 8592, + 4505, + 9709 + ], + "blue_alliance": [ + 3136, + 1599, + 339 + ] + }, + { + "match_key": "qm28", + "red_alliance": [ + 4099, + 2421, + 422 + ], + "blue_alliance": [ + 540, + 2106, + 1895 + ] + }, + { + "match_key": "qm29", + "red_alliance": [ + 9684, + 8326, + 2998 + ], + "blue_alliance": [ + 2534, + 4286, + 539 + ] + }, + { + "match_key": "qm30", + "red_alliance": [ + 2199, + 5587, + 620 + ], + "blue_alliance": [ + 3373, + 617, + 6863 + ] + }, + { + "match_key": "qm31", + "red_alliance": [ + 9709, + 1599, + 6863 + ], + "blue_alliance": [ + 422, + 2534, + 8592 + ] + }, + { + "match_key": "qm32", + "red_alliance": [ + 2998, + 2106, + 9684 + ], + "blue_alliance": [ + 8326, + 836, + 4505 + ] + }, + { + "match_key": "qm33", + "red_alliance": [ + 5587, + 3373, + 1895 + ], + "blue_alliance": [ + 539, + 620, + 2199 + ] + }, + { + "match_key": "qm34", + "red_alliance": [ + 4099, + 1522, + 4286 + ], + "blue_alliance": [ + 617, + 5954, + 2421 + ] + }, + { + "match_key": "qm35", + "red_alliance": [ + 612, + 8590, + 540 + ], + "blue_alliance": [ + 339, + 3136, + 1731 + ] + }, + { + "match_key": "qm36", + "red_alliance": [ + 2106, + 5587, + 3136 + ], + "blue_alliance": [ + 8592, + 4505, + 1599 + ] + }, + { + "match_key": "qm37", + "red_alliance": [ + 1895, + 2998, + 1522 + ], + "blue_alliance": [ + 5954, + 9709, + 8590 + ] + }, + { + "match_key": "qm38", + "red_alliance": [ + 2199, + 4286, + 1731 + ], + "blue_alliance": [ + 540, + 612, + 6863 + ] + }, + { + "match_key": "qm39", + "red_alliance": [ + 422, + 4099, + 539 + ], + "blue_alliance": [ + 836, + 8326, + 9684 + ] + }, + { + "match_key": "qm40", + "red_alliance": [ + 2421, + 2534, + 620 + ], + "blue_alliance": [ + 339, + 617, + 3373 + ] + }, + { + "match_key": "qm41", + "red_alliance": [ + 2421, + 2106, + 3136 + ], + "blue_alliance": [ + 2199, + 540, + 339 + ] + }, + { + "match_key": "qm42", + "red_alliance": [ + 836, + 2998, + 3373 + ], + "blue_alliance": [ + 5587, + 5954, + 4505 + ] + }, + { + "match_key": "qm43", + "red_alliance": [ + 422, + 9684, + 1731 + ], + "blue_alliance": [ + 1522, + 1895, + 1599 + ] + }, + { + "match_key": "qm44", + "red_alliance": [ + 8326, + 612, + 617 + ], + "blue_alliance": [ + 4286, + 620, + 539 + ] + }, + { + "match_key": "qm45", + "red_alliance": [ + 9709, + 8592, + 4099 + ], + "blue_alliance": [ + 2534, + 8590, + 6863 + ] + }, + { + "match_key": "qm46", + "red_alliance": [ + 2998, + 1599, + 8592 + ], + "blue_alliance": [ + 2106, + 2199, + 8590 + ] + }, + { + "match_key": "qm47", + "red_alliance": [ + 339, + 2534, + 3136 + ], + "blue_alliance": [ + 5954, + 6863, + 4286 + ] + }, + { + "match_key": "qm48", + "red_alliance": [ + 1895, + 540, + 836 + ], + "blue_alliance": [ + 422, + 1522, + 620 + ] + }, + { + "match_key": "qm49", + "red_alliance": [ + 3373, + 4505, + 9709 + ], + "blue_alliance": [ + 612, + 8326, + 5587 + ] + }, + { + "match_key": "qm50", + "red_alliance": [ + 1731, + 617, + 2421 + ], + "blue_alliance": [ + 9684, + 4099, + 539 + ] + }, + { + "match_key": "qm51", + "red_alliance": [ + 617, + 2998, + 8592 + ], + "blue_alliance": [ + 540, + 5954, + 9709 + ] + }, + { + "match_key": "qm52", + "red_alliance": [ + 9684, + 4099, + 620 + ], + "blue_alliance": [ + 1522, + 4505, + 5587 + ] + }, + { + "match_key": "qm53", + "red_alliance": [ + 8590, + 4286, + 339 + ], + "blue_alliance": [ + 836, + 2421, + 1599 + ] + }, + { + "match_key": "qm54", + "red_alliance": [ + 1895, + 2106, + 8326 + ], + "blue_alliance": [ + 6863, + 539, + 3373 + ] + }, + { + "match_key": "qm55", + "red_alliance": [ + 2199, + 1731, + 422 + ], + "blue_alliance": [ + 612, + 3136, + 2534 + ] + }, + { + "match_key": "qm56", + "red_alliance": [ + 4505, + 8592, + 4099 + ], + "blue_alliance": [ + 5587, + 620, + 836 + ] + }, + { + "match_key": "qm57", + "red_alliance": [ + 2998, + 1895, + 4286 + ], + "blue_alliance": [ + 5954, + 2106, + 422 + ] + }, + { + "match_key": "qm58", + "red_alliance": [ + 3136, + 540, + 617 + ], + "blue_alliance": [ + 539, + 8590, + 8326 + ] + }, + { + "match_key": "qm59", + "red_alliance": [ + 612, + 2534, + 339 + ], + "blue_alliance": [ + 2199, + 3373, + 2421 + ] + } +] diff --git a/src/page_managers/__init__.py b/src/page_managers/__init__.py index 1dff197..5066ee2 100644 --- a/src/page_managers/__init__.py +++ b/src/page_managers/__init__.py @@ -3,4 +3,3 @@ from .match_manager import MatchManager from .picklist_manager import PicklistManager from .team_manager import TeamManager -from .alliance_selection_manager import AllianceSelectionManager diff --git a/src/page_managers/alliance_selection_manager.py b/src/page_managers/alliance_selection_manager.py deleted file mode 100644 index 596cfc0..0000000 --- a/src/page_managers/alliance_selection_manager.py +++ /dev/null @@ -1,543 +0,0 @@ -"""Creates the `MatchManager` class used to set up the Match page and its graphs.""" - -import numpy as np -import streamlit as st -from scipy.integrate import quad -from scipy.stats import norm - -from .page_manager import PageManager -from utils import ( - alliance_breakdown, - bar_graph, - box_plot, - CalculatedStats, - colored_metric, - Criteria, - GeneralConstants, - GraphType, - multi_line_graph, - plotly_chart, - populate_missing_data, - Queries, - retrieve_match_schedule, - retrieve_pit_scouting_data, - retrieve_team_list, - retrieve_scouting_data, - scouting_data_for_team, - stacked_bar_graph, - win_percentages, -) - - -class AllianceSelectionManager(PageManager): - """The page manager for the `Alliance Selection` page.""" - - def __init__(self): - self.calculated_stats = CalculatedStats(retrieve_scouting_data()) - self.pit_scouting_data = retrieve_pit_scouting_data() - - def generate_input_section(self) -> list[list, list]: - """Creates the input section for the `Alliance Selection` page. - - Creates 3 dropdowns to choose teams - - :return: List with 3 choices - """ - team_list = retrieve_team_list() - - # Create the different dropdowns to choose the three teams for Red Alliance. - team_1_col, team_2_col, team_3_col = st.columns(3) - team_1 = team_1_col.selectbox( - "Team 1", - team_list, - index=0 - ) - team_2 = team_2_col.selectbox( - "Team 2", - team_list, - index=1 - ) - team_3 = team_3_col.selectbox( - "Team 3", - team_list, - index=2 - ) - - return [team_1, team_2, team_3] - - def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: list[str]) -> None: - """Generates an alliance dashboard in the `Match` page. - - :param team_numbers: The teams to generate the alliance dashboard for. - :param color_gradient: The color gradient to use for graphs, depending on the alliance. - :return: - """ - fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col = st.columns(3) - - fastest_cyclers = sorted( - { - team: self.calculated_stats.driving_index(team) for team in team_numbers - }.items(), - key=lambda pair: pair[1], - reverse=True - ) - - # Colored metric displaying the fastest cycler in the alliance - with fastest_cycler_col: - colored_metric( - "Fastest Cycler", - fastest_cyclers[0][0], - background_color=color_gradient[0], - opacity=0.4, - border_opacity=0.9 - ) - - # Colored metric displaying the second fastest cycler in the alliance - with second_fastest_cycler_col: - colored_metric( - "Second Fastest Cycler", - fastest_cyclers[1][0], - background_color=color_gradient[1], - opacity=0.4, - border_opacity=0.9 - ) - - # Colored metric displaying the slowest cycler in the alliance - with slowest_cycler_col: - colored_metric( - "Slowest Cycler", - fastest_cyclers[2][0], - background_color=color_gradient[2], - opacity=0.4, - border_opacity=0.9 - ) - - def generate_drivetrain_dashboard(self, team_numbers: list[int], color_gradient: list[str]) -> None: - """Generates an drivetrain dashboard in the `Alliance Selection` page. - - :param team_numbers: The teams to generate the drivetrain dashboard for. - :param color_gradient: The color gradient to use for graphs. - :return: - """ - - team1_col, team2_col, team3_col = st.columns(3) - - drivetrain_data = [ - self.pit_scouting_data[ - self.pit_scouting_data["Team Number"] == team - ].iloc[0]["Drivetrain"] - for team in team_numbers] - - # Colored metric displaying the fastest cycler in the alliance - with team1_col: - colored_metric( - "Team " + str(team_numbers[0]) + " Drivetrain:", - drivetrain_data[0], - background_color=color_gradient[0], - opacity=0.4, - border_opacity=0.9 - ) - - # Colored metric displaying the second fastest cycler in the alliance - with team2_col: - colored_metric( - "Team " + str(team_numbers[1]) + " Drivetrain:", - drivetrain_data[1], - background_color=color_gradient[1], - opacity=0.4, - border_opacity=0.9 - ) - - # Colored metric displaying the slowest cycler in the alliance - with team3_col: - colored_metric( - "Team " + str(team_numbers[2]) + " Drivetrain:", - drivetrain_data[2], - background_color=color_gradient[2], - opacity=0.4, - border_opacity=0.9 - ) - - def generate_autonomous_graphs( - self, - team_numbers: list[int], - type_of_graph: str, - color_gradient: list[str] - ) -> None: - """Generates the autonomous graphs for the `Match` page. - - :param team_numbers: The teams to generate the graphs for. - :param type_of_graph: The type of graph to make (cycle contributions/point contributions). - :param color_gradient: The color gradient to use for graphs, depending on the alliance. - :return: - """ - display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - - auto_configuration_col, auto_engage_stats_col = st.columns(2) - auto_cycle_distribution_col, auto_cycles_over_time = st.columns(2) - - # Determine the best auto configuration for an alliance. - with auto_configuration_col: - average_auto_cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ).mean() - for team in team_numbers - ] - - plotly_chart( - bar_graph( - team_numbers, - average_auto_cycles_by_team, - x_axis_label="Teams", - y_axis_label=( - "Cycles in Auto" - if display_cycle_contributions - else "Points Scored in Auto" - ), - title="Average Auto Contribution", - color=color_gradient[1] - ) - ) - - # Determine the accuracy of teams when it comes to engaging onto the charge station - with auto_engage_stats_col: - successful_engages_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_ENGAGE_CRITERIA - ) - for team in team_numbers - ] - successful_docks_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_DOCK_CRITERIA - ) - for team in team_numbers - ] - missed_attempts_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_ENGAGE_ATTEMPTED, - Criteria.AUTO_ATTEMPT_CRITERIA - ) - successful_docks_by_team[idx] - successful_engages_by_team[idx] - for idx, team in enumerate(team_numbers) - ] - - plotly_chart( - stacked_bar_graph( - team_numbers, - [missed_attempts_by_team, successful_docks_by_team, successful_engages_by_team], - x_axis_label="Teams", - y_axis_label=["# of Missed Engages", "# of Docks", "# of Engages"], - y_axis_title="", - color_map=dict( - zip( - ["# of Missed Engages", "# of Docks", "# of Engages"], - color_gradient - ) - ), - title="Auto Engage Stats" - ) - ) - - # Box plot showing the distribution of cycles - with auto_cycle_distribution_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - box_plot( - team_numbers, - cycles_by_team, - x_axis_label="Teams", - y_axis_label=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Distribution of Auto Cycles" - if display_cycle_contributions - else "Distribution of Points Contributed During Auto" - ), - show_underlying_data=True, - color_sequence=color_gradient - ) - ) - - # Plot cycles over time - with auto_cycles_over_time: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - multi_line_graph( - *populate_missing_data(cycles_by_team), - x_axis_label="Match Index", - y_axis_label=team_numbers, - y_axis_title=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Auto Cycles Over Time" - if display_cycle_contributions - else "Points Contributed in Auto Over Time" - ), - color_map=dict(zip(team_numbers, color_gradient)) - ) - ) - - def generate_teleop_graphs( - self, - team_numbers: list[int], - type_of_graph: str, - color_gradient: list[str] - ) -> None: - """Generates the teleop graphs for the `Match` page. - - :param team_numbers: The teams to generate the graphs for. - :param type_of_graph: The type of graph to make (cycle contributions/point contributions). - :param color_gradient: The color gradient to use for graphs, depending on the alliance. - :return: - """ - display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - - teleop_cycles_by_level_col, teleop_game_piece_breakdown_col = st.columns(2) - teleop_cycles_over_time_col, teleop_cycles_distribution_col = st.columns(2) - - # Graph the teleop cycles per team by level (High/Mid/Low) - with teleop_cycles_by_level_col: - cycles_by_height = [] - - for height in (Queries.HIGH, Queries.MID, Queries.LOW): - cycles_by_height.append([ - self.calculated_stats.average_cycles_for_height( - team, - Queries.TELEOP_GRID, - height - ) * (1 if display_cycle_contributions else Criteria.TELEOP_GRID_POINTAGE[height]) - for team in team_numbers - ]) - - plotly_chart( - stacked_bar_graph( - team_numbers, - cycles_by_height, - x_axis_label="Teams", - y_axis_label=["High", "Mid", "Low"], - y_axis_title="", - color_map=dict( - zip( - ["High", "Mid", "Low"], - GeneralConstants.LEVEL_GRADIENT - ) - ), - title=( - "Average Cycles by Height" - if display_cycle_contributions - else "Average Points Contributed by Height" - ) - ).update_layout(xaxis={"categoryorder": "total descending"}) - ) - - # Graph the breakdown of game pieces by each team - with teleop_game_piece_breakdown_col: - cones_scored_by_team = [ - self.calculated_stats.cycles_by_game_piece_per_match( - team, - Queries.TELEOP_GRID, - Queries.CONE - ).sum() - for team in team_numbers - ] - cubes_scored_by_team = [ - self.calculated_stats.cycles_by_game_piece_per_match( - team, - Queries.TELEOP_GRID, - Queries.CUBE - ).sum() - for team in team_numbers - ] - - plotly_chart( - stacked_bar_graph( - team_numbers, - [cones_scored_by_team, cubes_scored_by_team], - x_axis_label="Teams", - y_axis_label=["Total # of Cones Scored", "Total # of Cubes Scored"], - y_axis_title="", - color_map=dict( - zip( - ["Total # of Cones Scored", "Total # of Cubes Scored"], - [GeneralConstants.CONE_COLOR, GeneralConstants.CUBE_COLOR] - ) - ), - title="Game Piece Breakdown by Team" - ).update_layout(xaxis={"categoryorder": "total descending"}) - ) - - # Box plot showing the distribution of cycles - with teleop_cycles_distribution_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.TELEOP_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.TELEOP_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - box_plot( - team_numbers, - cycles_by_team, - x_axis_label="Teams", - y_axis_label=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Distribution of Teleop Cycles" - if display_cycle_contributions - else "Distribution of Points Contributed During Teleop" - ), - show_underlying_data=True, - color_sequence=color_gradient - ) - ) - - # Plot cycles over time - with teleop_cycles_over_time_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.TELEOP_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.TELEOP_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - multi_line_graph( - *populate_missing_data(cycles_by_team), - x_axis_label="Match Index", - y_axis_label=team_numbers, - y_axis_title=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Teleop Cycles Over Time" - if display_cycle_contributions - else "Points Contributed in Teleop Over Time" - ), - color_map=dict(zip(team_numbers, color_gradient)) - ) - ) - - def generate_rating_graphs( - self, - team_numbers: list[int], - color_gradient: list[str] - ) -> None: - """Generates the teleop graphs for the `Match` page. - - :param team_numbers: The teams to generate the graphs for. - :param type_of_graph: The type of graph to make (cycle contributions/point contributions). - :param color_gradient: The color gradient to use for graphs, depending on the alliance. - :return: - """ - - driver_rating_col, defense_rating_col = st.columns(2) - disables_col, drivetrain_width_col = st.columns(2) - - with driver_rating_col: - driver_ratings = [ - self.calculated_stats.average_driver_rating(team) for team in team_numbers - ] - - plotly_chart( - bar_graph( - team_numbers, - driver_ratings, - x_axis_label="Teams", - y_axis_label="Driver Rating", - title="Driver Rating", - color=color_gradient[1] - ) - ) - - with defense_rating_col: - defense_ratings = [ - self.calculated_stats.average_defense_rating(team) for team in team_numbers - ] - - plotly_chart( - bar_graph( - team_numbers, - defense_ratings, - x_axis_label="Teams", - y_axis_label="Defense Rating", - title="Defense Rating", - color=color_gradient[1] - ) - ) - - with disables_col: - - disables_by_team = [ - self.calculated_stats.disables_by_team(team) for team in team_numbers - ] - - plotly_chart( - multi_line_graph( - *populate_missing_data(disables_by_team), - x_axis_label="Match Index", - y_axis_label=team_numbers, - y_axis_title="Disabled", - title=( - "Disables Over Time" - ), - color_map=dict(zip(team_numbers, color_gradient)) - ) - ) - - with drivetrain_width_col: - drivetrain_widths = [ - self.calculated_stats.drivetrain_width_by_team(team) for team in team_numbers - ] - - plotly_chart( - bar_graph( - team_numbers, - drivetrain_widths, - x_axis_label="Teams", - y_axis_label="Drivetrain Width", - title="Drivetrain Width", - color=color_gradient[1] - ) - ) - - \ No newline at end of file diff --git a/src/page_managers/custom_graphs_manager.py b/src/page_managers/custom_graphs_manager.py index f685268..f3a7324 100644 --- a/src/page_managers/custom_graphs_manager.py +++ b/src/page_managers/custom_graphs_manager.py @@ -40,7 +40,7 @@ def generate_input_section(self) -> list[list, list, Callable, str]: names_to_methods = { name.replace("_", " ").capitalize(): method for name, method in inspect.getmembers(self.calculated_stats, predicate=inspect.ismethod) - if not name.startswith("__") + if not name.startswith("__") and "(ignore)" not in method.__doc__ } st.write("#### 📈 Data to Display") diff --git a/src/page_managers/event_manager.py b/src/page_managers/event_manager.py index 89b1d5f..c8326e3 100644 --- a/src/page_managers/event_manager.py +++ b/src/page_managers/event_manager.py @@ -63,7 +63,7 @@ def generate_event_breakdown(self) -> None: average_cycles_per_team = sorted( [ - self.calculated_stats.average_cycles(team, Queries.TELEOP_GRID) + self.calculated_stats.average_cycles(team, Queries.TELEOP) for team in retrieve_team_list() ], reverse=True @@ -105,124 +105,5 @@ def generate_event_graphs(self, type_of_graph: str) -> None: """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS teams = retrieve_team_list() - auto_cycles_col, teleop_cycles_col = st.columns(2, gap="large") - # Display event-wide graph surrounding each team and their cycle/point contributions in autonomous. - with auto_cycles_col: - variable_key = f"auto_cycles_col_{type_of_graph}" - - auto_distributions = ( - self._retrieve_cycle_distributions(Queries.AUTO_GRID) - if display_cycle_contributions - else self._retrieve_point_distributions(Queries.AUTO_GRID) - ) - - auto_sorted_distributions = dict( - sorted( - zip(teams, auto_distributions), - key=lambda pair: (pair[1].median(), pair[1].mean()), - reverse=True - ) - ) - - auto_sorted_teams = list(auto_sorted_distributions.keys()) - auto_distributions = list(auto_sorted_distributions.values()) - - if not st.session_state.get(variable_key): - st.session_state[variable_key] = 0 - - plotly_chart( - box_plot( - auto_sorted_teams[ - st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY - ], - auto_distributions[ - st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY - ], - x_axis_label="Teams", - y_axis_label=f"{'Cycle' if display_cycle_contributions else 'Point'} Distribution", - title=f"{'Cycle' if display_cycle_contributions else 'Point'} Contributions in Autonomous" - ).update_layout( - showlegend=False - ) - ) - - previous_col, next_col = st.columns(2) - - if previous_col.button( - f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", - use_container_width=True, - key=f"prevAuto{type_of_graph}", - disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) - ): - st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY - st.experimental_rerun() - - if next_col.button( - f"Next {self.TEAMS_TO_SPLIT_BY} Teams", - use_container_width=True, - key=f"nextAuto{type_of_graph}", - disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) - ): - st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY - st.experimental_rerun() - - # Display event-wide graph surrounding each team and their cycle/point contributions in teleop. - with teleop_cycles_col: - variable_key = f"teleop_cycles_col_{type_of_graph}" - - teleop_distributions = ( - self._retrieve_cycle_distributions(Queries.TELEOP_GRID) - if display_cycle_contributions - else self._retrieve_point_distributions(Queries.TELEOP_GRID) - ) - - teleop_sorted_distributions = dict( - sorted( - zip(teams, teleop_distributions), - key=lambda pair: (pair[1].median(), pair[1].mean()), - reverse=True - ) - ) - - teleop_sorted_teams = list(teleop_sorted_distributions.keys()) - teleop_distributions = list(teleop_sorted_distributions.values()) - - if not st.session_state.get(variable_key): - st.session_state[variable_key] = 0 - - plotly_chart( - box_plot( - teleop_sorted_teams[ - st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY - ], - teleop_distributions[ - st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY - ], - x_axis_label="Teams", - y_axis_label=f"{'Cycle' if display_cycle_contributions else 'Point'} Distribution", - title=f"{'Cycle' if display_cycle_contributions else 'Point'} Contributions in Teleop" - ).update_layout( - showlegend=False - ) - ) - - previous_col, next_col = st.columns(2) - - if previous_col.button( - f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", - use_container_width=True, - key=f"prevTele{type_of_graph}", - disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) - ): - st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY - st.experimental_rerun() - - if next_col.button( - f"Next {self.TEAMS_TO_SPLIT_BY} Teams", - use_container_width=True, - key=f"nextTele{type_of_graph}", - disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) - ): - st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY - st.experimental_rerun() + # TODO: Add event graphs \ No newline at end of file diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index ce76715..8287fc6 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -253,12 +253,12 @@ def generate_match_prediction_dashboard( [ ( team, - average_points_contributed[idx], - scouting_data_for_team(team)["DriverRating"].mean(), + self.calculated_stats.average_driver_rating(team), + self.calculated_stats.average_counter_defense_skill(team) ) for idx, team in enumerate(red_alliance) ], - key=lambda info: (info[1] / info[2], 5 - info[2]), + key=lambda info: info[1] / info[2], )[-1][0] alliance_breakdown( @@ -277,12 +277,12 @@ def generate_match_prediction_dashboard( [ ( team, - average_points_contributed[idx], - scouting_data_for_team(team)["DriverRating"].mean(), + self.calculated_stats.average_driver_rating(team), + self.calculated_stats.average_counter_defense_skill(team) ) for idx, team in enumerate(blue_alliance) ], - key=lambda info: (info[1] / info[2], 5 - info[2]), + key=lambda info: info[1] / info[2], )[-1][0] alliance_breakdown( @@ -304,141 +304,7 @@ def generate_match_prediction_graphs( combined_teams = red_alliance + blue_alliance display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS color_sequence = ["#781212", "#163ba1"] # Bright red # Bright blue - - game_piece_breakdown_col, auto_cycles_col = st.columns(2) - teleop_cycles_col, cumulative_cycles_col = st.columns(2) - - # Breaks down game pieces between cones/cubes among the six teams - with game_piece_breakdown_col: - game_piece_breakdown = [ - [ - self.calculated_stats.cycles_by_game_piece_per_match( - team, Queries.TELEOP_GRID, game_piece - ).sum() - for team in combined_teams - ] - for game_piece in (Queries.CONE, Queries.CUBE) - ] - - plotly_chart( - stacked_bar_graph( - combined_teams, - game_piece_breakdown, - "Teams", - ["Total # of Cones Scored", "Total # of Cubes Scored"], - "Total Game Pieces Scored", - title="Game Piece Breakdown", - color_map={ - "Total # of Cones Scored": GeneralConstants.CONE_COLOR, # Cone color - "Total # of Cubes Scored": GeneralConstants.CUBE_COLOR, # Cube color - }, - ).update_layout(xaxis={"categoryorder": "total descending"}) - ) - - # Breaks down cycles/point contributions among both alliances in Autonomous. - with auto_cycles_col: - auto_alliance_distributions = [] - - for alliance in (red_alliance, blue_alliance): - cycles_in_alliance = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match( - team, Queries.AUTO_GRID - ) - ) - for team in alliance - ] - auto_alliance_distributions.append( - self.calculated_stats.cartesian_product( - *cycles_in_alliance, reduce_with_sum=True - ) - ) - - plotly_chart( - box_plot( - ["Red Alliance", "Blue Alliance"], - auto_alliance_distributions, - y_axis_label=( - "Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - f"Cycles During Autonomous (N={len(auto_alliance_distributions[0])})" - if display_cycle_contributions - else f"Points Contributed During Autonomous (N={len(auto_alliance_distributions[0])})" - ), - color_sequence=color_sequence, - ) - ) - - # Breaks down cycles/point contributions among both alliances in Teleop. - with teleop_cycles_col: - teleop_alliance_distributions = [] - - for alliance in (red_alliance, blue_alliance): - cycles_in_alliance = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.TELEOP_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match( - team, Queries.TELEOP_GRID - ) - ) - for team in alliance - ] - teleop_alliance_distributions.append( - self.calculated_stats.cartesian_product( - *cycles_in_alliance, reduce_with_sum=True - ) - ) - - plotly_chart( - box_plot( - ["Red Alliance", "Blue Alliance"], - teleop_alliance_distributions, - y_axis_label=( - "Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - f"Cycles During Teleop (N={len(teleop_alliance_distributions[0])})" - if display_cycle_contributions - else f"Points Contributed During Teleop (N={len(teleop_alliance_distributions[0])})" - ), - color_sequence=color_sequence, - ) - ) - - # Show cumulative cycles/point contributions (auto and teleop) - with cumulative_cycles_col: - cumulative_alliance_distributions = [ - auto_distribution + teleop_distribution - for auto_distribution, teleop_distribution in zip( - auto_alliance_distributions, teleop_alliance_distributions - ) - ] - - plotly_chart( - box_plot( - ["Red Alliance", "Blue Alliance"], - cumulative_alliance_distributions, - y_axis_label=( - "Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - f"Cycles During Auto + Teleop (N={len(cumulative_alliance_distributions[0])})" - if display_cycle_contributions - else f"Points Contributed During Auto + Teleop (N={len(cumulative_alliance_distributions[0])})" - ), - color_sequence=color_sequence, - ) - ) + # TODO: Add match prediction graphs def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: list[str]) -> None: """Generates an alliance dashboard in the `Match` page. @@ -447,30 +313,7 @@ def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: l :param color_gradient: The color gradient to use for graphs, depending on the alliance. :return: """ - if self.pit_scouting_data is not None: - fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col, tolerance_col = st.columns(4) - - # Colored metric that displays the tolerance when engaging on the charge station. - with tolerance_col: - total_width = 0 - - for team in team_numbers: - try: - total_width += self.pit_scouting_data[ - self.pit_scouting_data["Team Number"] == team - ].iloc[0]["Drivetrain Width"] / 12 - except IndexError: - print(f"{team} has no pit scouting data.") # For debugging purposes when looking at logs. - - colored_metric( - "Tolerance When Engaging (ft.)", - f"{GeneralConstants.CHARGE_STATION_LENGTH - total_width:.1f}", - background_color=color_gradient[3], - opacity=0.4, - border_opacity=0.9 - ) - else: - fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col = st.columns(3) + fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col = st.columns(3) fastest_cyclers = sorted( { @@ -525,177 +368,7 @@ def generate_autonomous_graphs( """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - auto_configuration_col, auto_engage_stats_col = st.columns(2) - auto_cycle_distribution_col, auto_cycles_over_time = st.columns(2) - - # Determine the best auto configuration for an alliance. - with auto_configuration_col: - teams_sorted_by_point_contribution = dict( - sorted( - { - team: ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ) - for team in team_numbers - }.items(), - key=lambda pair: pair[1].max(), - reverse=True - ) - ) - - # Y values of plot - points_by_grid = {} - full_grid = [Queries.LEFT, Queries.COOP, Queries.RIGHT] - grids_occupied = set() - - for team, point_contributions in teams_sorted_by_point_contribution.items(): - grid_placements = self.calculated_stats.classify_autos_by_match(team) - autos_sorted = sorted( - zip(point_contributions, grid_placements), - key=lambda pair: pair[0], - reverse=True - ) - - for auto_pointage, grid in autos_sorted: - if grid not in grids_occupied: - points_by_grid[team] = (auto_pointage, grid) - grids_occupied.add(grid) - break - else: - # Add a placeholder in the worst-case scenario - placeholder_grid = next(iter(set(full_grid).difference(grids_occupied))) - points_by_grid[team] = (point_contributions.max(), placeholder_grid) - grids_occupied.add(placeholder_grid) - - # Sort points by grid in order to go from left to right (left, coop, right). - points_by_grid = dict( - sorted( - points_by_grid.items(), - key=lambda pair: full_grid.index(pair[1][1]) - ) - ) - - plotly_chart( - bar_graph( - list(points_by_grid.keys()), - [value[0] for value in points_by_grid.values()], - x_axis_label="Teams (Left, Coop, Right)", - y_axis_label=( - "Cycles in Auto" - if display_cycle_contributions - else "Points Scored in Auto" - ), - title="Best Auto Configuration", - color=color_gradient[1] - ) - ) - - # Determine the accuracy of teams when it comes to engaging onto the charge station - with auto_engage_stats_col: - successful_engages_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_ENGAGE_CRITERIA - ) - for team in team_numbers - ] - successful_docks_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_DOCK_CRITERIA - ) - for team in team_numbers - ] - missed_attempts_by_team = [ - self.calculated_stats.cumulative_stat( - team, - Queries.AUTO_ENGAGE_ATTEMPTED, - Criteria.AUTO_ATTEMPT_CRITERIA - ) - successful_docks_by_team[idx] - successful_engages_by_team[idx] - for idx, team in enumerate(team_numbers) - ] - - plotly_chart( - stacked_bar_graph( - team_numbers, - [missed_attempts_by_team, successful_docks_by_team, successful_engages_by_team], - x_axis_label="Teams", - y_axis_label=["# of Missed Engages", "# of Docks", "# of Engages"], - y_axis_title="", - color_map=dict( - zip( - ["# of Missed Engages", "# of Docks", "# of Engages"], - color_gradient - ) - ), - title="Auto Engage Stats" - ) - ) - - # Box plot showing the distribution of cycles - with auto_cycle_distribution_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - box_plot( - team_numbers, - cycles_by_team, - x_axis_label="Teams", - y_axis_label=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Distribution of Auto Cycles" - if display_cycle_contributions - else "Distribution of Points Contributed During Auto" - ), - show_underlying_data=True, - color_sequence=color_gradient - ) - ) - - # Plot cycles over time - with auto_cycles_over_time: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.AUTO_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.AUTO_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - multi_line_graph( - *populate_missing_data(cycles_by_team), - x_axis_label="Match Index", - y_axis_label=team_numbers, - y_axis_title=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Auto Cycles Over Time" - if display_cycle_contributions - else "Points Contributed in Auto Over Time" - ), - color_map=dict(zip(team_numbers, color_gradient)) - ) - ) + # TODO: add auton graphs def generate_teleop_graphs( self, @@ -712,137 +385,4 @@ def generate_teleop_graphs( """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - teleop_cycles_by_level_col, teleop_game_piece_breakdown_col = st.columns(2) - teleop_cycles_over_time_col, teleop_cycles_distribution_col = st.columns(2) - - # Graph the teleop cycles per team by level (High/Mid/Low) - with teleop_cycles_by_level_col: - cycles_by_height = [] - - for height in (Queries.HIGH, Queries.MID, Queries.LOW): - cycles_by_height.append([ - self.calculated_stats.average_cycles_for_height( - team, - Queries.TELEOP_GRID, - height - ) * (1 if display_cycle_contributions else Criteria.TELEOP_GRID_POINTAGE[height]) - for team in team_numbers - ]) - - plotly_chart( - stacked_bar_graph( - team_numbers, - cycles_by_height, - x_axis_label="Teams", - y_axis_label=["High", "Mid", "Low"], - y_axis_title="", - color_map=dict( - zip( - ["High", "Mid", "Low"], - GeneralConstants.LEVEL_GRADIENT - ) - ), - title=( - "Average Cycles by Height" - if display_cycle_contributions - else "Average Points Contributed by Height" - ) - ).update_layout(xaxis={"categoryorder": "total descending"}) - ) - - # Graph the breakdown of game pieces by each team - with teleop_game_piece_breakdown_col: - cones_scored_by_team = [ - self.calculated_stats.cycles_by_game_piece_per_match( - team, - Queries.TELEOP_GRID, - Queries.CONE - ).sum() - for team in team_numbers - ] - cubes_scored_by_team = [ - self.calculated_stats.cycles_by_game_piece_per_match( - team, - Queries.TELEOP_GRID, - Queries.CUBE - ).sum() - for team in team_numbers - ] - - plotly_chart( - stacked_bar_graph( - team_numbers, - [cones_scored_by_team, cubes_scored_by_team], - x_axis_label="Teams", - y_axis_label=["Total # of Cones Scored", "Total # of Cubes Scored"], - y_axis_title="", - color_map=dict( - zip( - ["Total # of Cones Scored", "Total # of Cubes Scored"], - [GeneralConstants.CONE_COLOR, GeneralConstants.CUBE_COLOR] - ) - ), - title="Game Piece Breakdown by Team" - ).update_layout(xaxis={"categoryorder": "total descending"}) - ) - - # Box plot showing the distribution of cycles - with teleop_cycles_distribution_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.TELEOP_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.TELEOP_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - box_plot( - team_numbers, - cycles_by_team, - x_axis_label="Teams", - y_axis_label=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Distribution of Teleop Cycles" - if display_cycle_contributions - else "Distribution of Points Contributed During Teleop" - ), - show_underlying_data=True, - color_sequence=color_gradient - ) - ) - - # Plot cycles over time - with teleop_cycles_over_time_col: - cycles_by_team = [ - ( - self.calculated_stats.cycles_by_match(team, Queries.TELEOP_GRID) - if display_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team, Queries.TELEOP_GRID) - ) - for team in team_numbers - ] - - plotly_chart( - multi_line_graph( - *populate_missing_data(cycles_by_team), - x_axis_label="Match Index", - y_axis_label=team_numbers, - y_axis_title=( - "# of Cycles" - if display_cycle_contributions - else "Points Contributed" - ), - title=( - "Teleop Cycles Over Time" - if display_cycle_contributions - else "Points Contributed in Teleop Over Time" - ), - color_map=dict(zip(team_numbers, color_gradient)) - ) - ) + # TODO: add teleop graphs \ No newline at end of file diff --git a/src/page_managers/picklist_manager.py b/src/page_managers/picklist_manager.py index b456639..19f9ad4 100644 --- a/src/page_managers/picklist_manager.py +++ b/src/page_managers/picklist_manager.py @@ -23,13 +23,13 @@ def __init__(self): self.requested_stats = { "Average Auto Cycles": partial( self.calculated_stats.average_cycles, - type_of_grid=Queries.AUTO_GRID + mode=Queries.AUTO ), "Average Teleop Cycles": partial( self.calculated_stats.average_cycles, - type_of_grid=Queries.TELEOP_GRID + mode=Queries.TELEOP ) - } + } # TODO: Add more stats here later def generate_input_section(self) -> list[list, list]: """Creates the input section for the `Picklist` page. diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index ffb461b..9fe9da8 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -73,7 +73,7 @@ def generate_metrics(self, team_number: int) -> None: drivetrain = self.pit_scouting_data[ self.pit_scouting_data["Team Number"] == team_number ].iloc[0]["Drivetrain"].split("/")[0] # The splitting at / is used to shorten the drivetrain type. - except IndexError: + except (IndexError, TypeError): drivetrain = "—" colored_metric( @@ -87,11 +87,11 @@ def generate_metrics(self, team_number: int) -> None: with auto_cycle_col: average_auto_cycles = self.calculated_stats.average_cycles( team_number, - Queries.AUTO_GRID + Queries.AUTO ) auto_cycles_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.average_cycles(team, Queries.AUTO_GRID) + lambda self, team: self.average_cycles(team, Queries.AUTO) ) colored_metric( "Average Auto Cycles", @@ -103,11 +103,11 @@ def generate_metrics(self, team_number: int) -> None: with teleop_cycle_col: average_teleop_cycles = self.calculated_stats.average_cycles( team_number, - Queries.TELEOP_GRID + Queries.TELEOP ) teleop_cycles_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.average_cycles(team, Queries.TELEOP_GRID) + lambda self, team: self.average_cycles(team, Queries.TELEOP) ) colored_metric( "Average Teleop Cycles", @@ -135,62 +135,8 @@ def generate_metrics(self, team_number: int) -> None: invert_threshold=True ) - # Metric for total auto engage attempts - with auto_engage_col: - total_auto_engage_attempts = self.calculated_stats.cumulative_stat( - team_number, - Queries.AUTO_ENGAGE_ATTEMPTED, - Criteria.AUTO_ATTEMPT_CRITERIA - ) - auto_engage_attempts_for_percentile = self.calculated_stats.quantile_stat( - 0.5, - lambda self, team: self.cumulative_stat( - team, - Queries.AUTO_ENGAGE_ATTEMPTED, - Criteria.AUTO_ATTEMPT_CRITERIA - ) - ) - - colored_metric( - "Auto Engage Attempts", - total_auto_engage_attempts, - threshold=auto_engage_attempts_for_percentile - ) - - # Metric for auto engage accuracy - with auto_engage_accuracy_col: - total_successful_engages = self.calculated_stats.cumulative_stat( - team_number, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_ENGAGE_CRITERIA - ) - auto_engage_accuracy = ( - total_successful_engages / total_auto_engage_attempts - if total_auto_engage_attempts - else 0.0 - ) - - colored_metric( - "Auto Engage Accuracy", - auto_engage_accuracy, - threshold=0.75, - value_formatter=lambda value: f"{value:.1%}" - ) + # TODO: Add the other metrics - # Metric for average auto accuracy by match - with auto_accuracy_col: - average_auto_accuracy = self.calculated_stats.average_auto_accuracy(team_number) - auto_accuracy_for_percentile = self.calculated_stats.quantile_stat( - 0.5, - lambda self, team: self.average_auto_accuracy(team) - ) - - colored_metric( - "Average Auto Accuracy (%)", - average_auto_accuracy, - threshold=auto_accuracy_for_percentile, - value_formatter=lambda value: f"{value:.1%}" - ) def generate_autonomous_graphs( self, @@ -206,61 +152,7 @@ def generate_autonomous_graphs( team_data = scouting_data_for_team(team_number) using_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - auto_cycles_over_time_col, auto_engage_stats_col = st.columns(2) - - # Graph for auto cycles over time - with auto_cycles_over_time_col: - auto_cycles_over_time = ( - self.calculated_stats.cycles_by_match(team_number, Queries.AUTO_GRID) - if using_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team_number, Queries.AUTO_GRID) - ) - - plotly_chart( - line_graph( - x=team_data[Queries.MATCH_KEY], - y=auto_cycles_over_time, - x_axis_label="Match Key", - y_axis_label=( - "# of Auto Cycles" - if using_cycle_contributions - else "Points Contributed" - ), - title=( - "Auto Cycles Over Time" - if using_cycle_contributions - else "Auto Points Contributed Over Time" - ) - ) - ) - - # Bar graph for displaying how successful a team is at their auto engaging. - with auto_engage_stats_col: - total_successful_engages = self.calculated_stats.cumulative_stat( - team_number, - Queries.AUTO_CHARGING_STATE, - Criteria.SUCCESSFUL_ENGAGE_CRITERIA - ) - total_successful_docks = self.calculated_stats.cumulative_stat( - team_number, - Queries.AUTO_CHARGING_STATE, - {"Dock": 1} - ) - total_missed_engages = self.calculated_stats.cumulative_stat( - team_number, - Queries.AUTO_ENGAGE_ATTEMPTED, - Criteria.AUTO_ATTEMPT_CRITERIA - ) - total_successful_engages - total_successful_docks - - plotly_chart( - bar_graph( - x=["# of Successful Engages", "# of Successful Docks", "# of Missed Engages"], - y=[total_successful_engages, total_successful_docks, total_missed_engages], - x_axis_label="", - y_axis_label="# of Occurences", - title="Auto Charge Station Statistics" - ) - ) + # TODO: Add autonomous graphs def generate_teleop_graphs( self, @@ -276,93 +168,4 @@ def generate_teleop_graphs( team_data = scouting_data_for_team(team_number) using_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - cycles_by_height_col, teleop_cycles_over_time_col, breakdown_cycles_col = st.columns(3) - - # Bar graph for displaying average # of cycles per height - with cycles_by_height_col: - cycles_for_low = self.calculated_stats.average_cycles_for_height( - team_number, - Queries.TELEOP_GRID, - Queries.LOW - ) * (1 if using_cycle_contributions else 2) - cycles_for_mid = self.calculated_stats.average_cycles_for_height( - team_number, - Queries.TELEOP_GRID, - Queries.MID - ) * (1 if using_cycle_contributions else 3) - cycles_for_high = self.calculated_stats.average_cycles_for_height( - team_number, - Queries.TELEOP_GRID, - Queries.HIGH - ) * (1 if using_cycle_contributions else 5) - - plotly_chart( - bar_graph( - x=["Hybrid Avr.", "Mid Avr.", "High Avr."], - y=[cycles_for_low, cycles_for_mid, cycles_for_high], - x_axis_label="Node Height", - y_axis_label=( - "Average # of Teleop Cycles" - if using_cycle_contributions - else "Average Pts. Contributed" - ), - title=( - "Average # of Teleop Cycles by Height" - if using_cycle_contributions - else "Average Pts. Contributed by Height" - ) - ) - ) - - # Graph for teleop cycles over time - with teleop_cycles_over_time_col: - teleop_cycles_over_time = ( - self.calculated_stats.cycles_by_match(team_number, Queries.TELEOP_GRID) - if using_cycle_contributions - else self.calculated_stats.points_contributed_by_match(team_number, Queries.TELEOP_GRID) - ) - - plotly_chart( - line_graph( - x=team_data[Queries.MATCH_KEY], - y=teleop_cycles_over_time, - x_axis_label="Match Key", - y_axis_label=( - "# of Teleop Cycles" - if using_cycle_contributions - else "Points Contributed" - ), - title=( - "Teleop Cycles Over Time" - if using_cycle_contributions - else "Teleop Points Contributed Over Time" - ) - ) - ) - - # Stacked bar graph displaying the breakdown of cones and cubes in Teleop - with breakdown_cycles_col: - total_cones_scored = self.calculated_stats.cycles_by_game_piece_per_match( - team_number, - Queries.TELEOP_GRID, - Queries.CONE - ).sum() - total_cubes_scored = self.calculated_stats.cycles_by_game_piece_per_match( - team_number, - Queries.TELEOP_GRID, - Queries.CUBE - ).sum() - - plotly_chart( - stacked_bar_graph( - x=[str(team_number)], - y=[[total_cones_scored], [total_cubes_scored]], - x_axis_label="Team Number", - y_axis_label=["Total # of Cones Scored", "Total # of Cubes Scored"], - title="Game Piece Breakdown", - color_map={ - "Total # of Cones Scored": GeneralConstants.CONE_COLOR, # Cone color - "Total # of Cubes Scored": GeneralConstants.CUBE_COLOR # Cube color - } - ) - ) + # TODO: Add teleop graphs diff --git a/src/pages/5_Alliance_Selection.py b/src/pages/5_Alliance_Selection.py deleted file mode 100644 index 3899caf..0000000 --- a/src/pages/5_Alliance_Selection.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Creates the page for match-specific graphs in Streamlit, allowing the user to choose the teams..""" - -import streamlit as st - -from page_managers import AllianceSelectionManager -from utils import GeneralConstants, GraphType - -# Configuration for Streamlit -st.set_page_config( - layout="wide", - page_title="Alliance Selection", - page_icon="🤭", -) -alliance_selection_manager = AllianceSelectionManager() - -if __name__ == '__main__': - # Write the title of the page. - st.write("# Alliance Selection Dashboard") - - # Generate the input section of the `Alliance Selection` page. - teams_selected = alliance_selection_manager.generate_input_section() - - # Generate alliance dashboard - alliance_selection_manager.generate_alliance_dashboard( - teams_selected, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - auto_tab, teleop_tab, rating_tab = st.tabs( - ["🤖 Autonomous", "🎮 Teleop", "📊 Ratings"] - ) - - with auto_tab: - auto_cycle_tab, auto_points_tab = st.tabs( - ["📈 Cycle Contribution Graphs", "🧮 Point Contribution Graphs"] - ) - - with auto_cycle_tab: - alliance_selection_manager.generate_autonomous_graphs( - teams_selected, - type_of_graph=GraphType.CYCLE_CONTRIBUTIONS, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - with auto_points_tab: - alliance_selection_manager.generate_autonomous_graphs( - teams_selected, - type_of_graph=GraphType.POINT_CONTRIBUTIONS, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - with teleop_tab: - alliance_selection_manager.generate_drivetrain_dashboard( - teams_selected, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - teleop_cycle_tab, teleop_points_tab = st.tabs( - ["📈 Cycle Contribution Graphs", "🧮 Point Contribution Graphs"] - ) - - with teleop_cycle_tab: - alliance_selection_manager.generate_teleop_graphs( - teams_selected, - type_of_graph=GraphType.CYCLE_CONTRIBUTIONS, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - with teleop_points_tab: - alliance_selection_manager.generate_teleop_graphs( - teams_selected, - type_of_graph=GraphType.POINT_CONTRIBUTIONS, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) - - with rating_tab: - alliance_selection_manager.generate_rating_graphs( - teams_selected, - color_gradient=GeneralConstants.GOLD_GRADIENT - ) diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 981b985..0e8d882 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -1,10 +1,10 @@ """File that contains the class which calculates statistics for a team/event/for other purposes.""" - +from functools import reduce from typing import Callable import numpy as np from numpy import percentile -from pandas import DataFrame, Series +from pandas import DataFrame, Series, isna from .constants import Criteria, Queries from .functions import scouting_data_for_team, retrieve_team_list, retrieve_pit_scouting_data @@ -29,7 +29,7 @@ def average_points_contributed(self, team_number: int) -> float: """ return self.points_contributed_by_match(team_number).mean() - def points_contributed_by_match(self, team_number: int, type_of_grid: str = "") -> Series: + def points_contributed_by_match(self, team_number: int, mode: str = "") -> Series: """Returns the points contributed by match for a team. The following custom graphs are supported with this function: @@ -38,98 +38,85 @@ def points_contributed_by_match(self, team_number: int, type_of_grid: str = "") - Multi line graph :param team_number: The team number to calculate the points contributed over the matches they played. - :param type_of_grid: Optional argument defining which mode to return the total points for (AutoGrid/TeleopGrid) + :param mode: Optional argument defining which mode to return the total points for (Auto/Teleop) :return: A Series containing the points contributed by said team per match. """ team_data = scouting_data_for_team(team_number, self.data) - auto_grid_points = team_data[Queries.AUTO_GRID].apply( - lambda grid_data: sum([ - Criteria.AUTO_GRID_POINTAGE[game_piece[1]] - for game_piece in grid_data.split("|") - if game_piece - ]) - ) - auto_mobility_points = team_data[Queries.LEFT_COMMUNITY].apply( - lambda left_community: Criteria.MOBILITY_CRITERIA[left_community] * 3 - ) - auto_charge_station_points = team_data[Queries.AUTO_CHARGING_STATE].apply( - lambda charging_state: Criteria.AUTO_CHARGE_POINTAGE.get(charging_state, 0) + # Autonomous calculations + auto_speaker_points = team_data[Queries.AUTO_SPEAKER].apply(lambda cycle: cycle * 5) + auto_amp_points = team_data[Queries.AUTO_AMP].apply(lambda cycle: cycle * 2) + auto_leave_points = team_data[Queries.LEFT_STARTING_ZONE].apply( + lambda left_starting_zone: Criteria.BOOLEAN_CRITERIA[left_starting_zone] ) + total_auto_points = auto_speaker_points + auto_amp_points + auto_leave_points - teleop_grid_points = team_data[Queries.TELEOP_GRID].apply( - lambda grid_data: sum([ - Criteria.TELEOP_GRID_POINTAGE[game_piece[1]] - for game_piece in grid_data.split("|") - if game_piece - ]) - ) + # Teleop calculations + teleop_speaker_points = team_data[Queries.TELEOP_SPEAKER].apply(lambda cycle: cycle * 2) + teleop_amp_points = team_data[Queries.TELEOP_AMP] + total_teleop_points = teleop_speaker_points + teleop_amp_points - endgame_points = team_data[Queries.ENDGAME_FINAL_CHARGE].apply( - lambda charging_state: Criteria.ENDGAME_POINTAGE.get(charging_state, 0) + # Endgame (stage) calculations + park_points = team_data[Queries.PARKED_UNDER_STAGE].apply( + lambda parking_state: Criteria.BOOLEAN_CRITERIA[parking_state] ) - - if type_of_grid == Queries.AUTO_GRID: - return auto_grid_points + auto_mobility_points + auto_charge_station_points - elif type_of_grid == Queries.TELEOP_GRID: - return teleop_grid_points - - return ( - auto_grid_points - + auto_mobility_points - + auto_charge_station_points - + teleop_grid_points - + endgame_points + climb_points = team_data[Queries.CLIMBED_CHAIN].apply( + lambda climbing_state: Criteria.BOOLEAN_CRITERIA[climbing_state] * 3 + ) + harmony_points = team_data[Queries.HARMONIZED_ON_CHAIN].apply( + lambda harmonized: Criteria.BOOLEAN_CRITERIA[harmonized] * 2 ) + trap_points = team_data[Queries.TELEOP_TRAP].apply(lambda cycle: cycle * 5) + total_endgame_points = park_points + climb_points + harmony_points + trap_points - def classify_autos_by_match(self, team_number: int) -> Series: - """Classifies each auto mode performed by a team. - As of now, this method only classifies grid placement (cable cover/charge station/loading zone). + if mode == Queries.AUTO: + return total_auto_points + elif mode == Queries.TELEOP: + return total_teleop_points - :return: A series containing grid placements indicating where the team started. - """ - team_data = scouting_data_for_team(team_number, self.data) - positions_to_placements = { - "1": Queries.LEFT, "2": Queries.LEFT, "3": Queries.RIGHT, - "4": Queries.COOP, "5": Queries.COOP, "6": Queries.COOP, - "7": Queries.RIGHT, "8": Queries.RIGHT, "9": Queries.RIGHT - } - - return team_data[Queries.AUTO_GRID].apply( - lambda grid_data: ( - positions_to_placements[grid_data[0]] - if grid_data - else Queries.LEFT - ) + return ( + total_auto_points + + total_teleop_points + + total_endgame_points ) # Cycle calculation methods - def average_cycles(self, team_number: int, type_of_grid: str) -> float: + def average_cycles(self, team_number: int, mode: str) -> float: """Calculates the average cycles for a team in either autonomous or teleop (wrapper around `cycles_by_match`). The following custom graphs are supported with this function: - Bar graph :param team_number: The team number to calculate the average cycles for. - :param type_of_grid: The mode to calculate said cycles for (AutoGrid/TeleopGrid) + :param mode: The mode to calculate said cycles for (Auto/Teleop) :return: A float representing the average cycles for said team in the mode specified. """ - return self.cycles_by_match(team_number, type_of_grid).mean() + return self.cycles_by_match(team_number, mode).mean() - def average_cycles_for_height(self, team_number: int, type_of_grid: str, height: str) -> float: - """Calculates the average cycles for a team in either autonomous or teleop (wrapper around `cycles_by_match`). + def average_cycles_for_structure(self, team_number: int, structure: str) -> float: + """Calculates the average cycles for a team for a structure (wrapper around `cycles_by_match`). The following custom graphs are supported with this function: - Bar graph :param team_number: The team number to calculate the average cycles for. - :param type_of_grid: The mode to calculate said cycles for (AutoGrid/TeleopGrid) - :param height: The height to return cycles by match for (H/M/L) - :return: A float representing the average cycles for said team in the mode specified. + :param structure: The structure to return cycles for (AutoSpeaker/AutoAmp/TeleopSpeaker/TeleopAmp/TeleopTrap) + :return: A float representing the average cycles for said team in the structure specified. + """ + return self.cycles_by_structure_per_match(team_number, structure).mean() + + def average_potential_amplification_periods(self, team_number: int) -> float: + """Returns the potential amplification periods a team is capable of by match. + + The following custom graphs are supported with this function: + - Bar graph + + The amplification periods that a team is capable of is decided by their auto + teleop amp cycles divided by two + :param team_number: The team to determine the potential amplification periods for. """ - return self.cycles_by_height_per_match(team_number, type_of_grid, height).mean() + return self.potential_amplification_periods_by_match(team_number).mean() - def cycles_by_match(self, team_number: int, type_of_grid: str) -> Series: + def cycles_by_match(self, team_number: int, mode: str) -> Series: """Returns the cycles for a certain mode (autonomous/teleop) in a match The following custom graphs are supported with this function: @@ -138,16 +125,18 @@ def cycles_by_match(self, team_number: int, type_of_grid: str) -> Series: - Multi line graph :param team_number: The team number to calculate the cycles by match for. - :param type_of_grid: The mode to return cycles by match for (AutoGrid/TeleopGrid) + :param mode: The mode to return cycles by match for (Auto/Teleop) :return: A series containing the cycles per match for the mode specified. """ team_data = scouting_data_for_team(team_number, self.data) - return team_data[type_of_grid].apply( - lambda grid_data: len(grid_data) if type(grid_data) is list else len(grid_data.split("|")) - ) - def cycles_by_height_per_match(self, team_number: int, type_of_grid: str, height: str) -> Series: - """Returns the cycles for a certain mode (autonomous/teleop) and height in a match + if mode == Queries.AUTO: + return team_data[Queries.AUTO_SPEAKER] + team_data[Queries.AUTO_AMP] + else: + return team_data[Queries.TELEOP_SPEAKER] + team_data[Queries.TELEOP_AMP] + team_data[Queries.TELEOP_TRAP] + + def cycles_by_structure_per_match(self, team_number: int, structure: str | tuple) -> Series: + """Returns the cycles for a certain structure (auto speaker, auto amp, etc.) in a match The following custom graphs are supported with this function: - Line graph @@ -155,97 +144,112 @@ def cycles_by_height_per_match(self, team_number: int, type_of_grid: str, height - Multi line graph :param team_number: The team number to calculate the cycles by height per match for. - :param type_of_grid: The mode to return cycles by match for (AutoGrid/TeleopGrid) - :param height: The height to return cycles by match for (H/M/L) - :return: A series containing the cycles per match for the mode specified. + :param structure: The structure to return cycles for (AutoSpeaker/AutoAmp/TeleopSpeaker/TeleopAmp/TeleopTrap) + :return: A series containing the cycles per match for the structure specified. """ team_data = scouting_data_for_team(team_number, self.data) - return team_data[type_of_grid].apply( - lambda grid_data: len([ - game_piece for game_piece in grid_data.split("|") - if game_piece and game_piece[1] == height - ]) - ) - def cycles_by_game_piece_per_match(self, team_number: int, type_of_grid: str, game_piece: str) -> Series: - """Returns the cycles for a certain game piece across matches. + if isinstance(structure, tuple): + return reduce(lambda x, y: x + y, [team_data[struct] for struct in structure]) + else: + return team_data[structure] + + def potential_amplification_periods_by_match(self, team_number: int) -> Series: + """Returns the potential amplification periods a team is capable of by match. The following custom graphs are supported with this function: - Line graph - Box plot - Multi line graph - :param team_number: The team number to calculate the cycles by game piece per match for. - :param type_of_grid: The type of mode to calculate the game piece cycles for (AutoGrid/TeleopGrid) - :param game_piece: The type of game piece to count cycles for (cone/cube) - :return: A series containing the cycles per match for the game piece specified. + The amplification periods that a team is capable of is decided by their auto + teleop amp cycles divided by two + :param team_number: The team to determine the potential amplification periods for. """ - team_data = scouting_data_for_team(team_number, self.data) - game_piece_positions = ( - {"1", "3", "4", "6", "7", "9"} - if game_piece == Queries.CONE - else {"2", "5", "8"} - ) + return self.cycles_by_structure_per_match(team_number, (Queries.AUTO_AMP, Queries.TELEOP_AMP)) // 2 - return team_data[type_of_grid].apply( - lambda grid_data: len([ - cycle for cycle in grid_data.split("|") - if cycle and ( - cycle[0] in game_piece_positions - or cycle[2:] == game_piece - ) - ]) - ) - - # Accuracy methods - def average_auto_accuracy(self, team_number: int) -> float: - """Returns the average auto accuracy of a team (wrapper around `auto_accuracy_by_match`). + # Alliance-wide methods + def average_coop_bonus_rate(self, team_number_one: int, team_number_two: int, team_number_three: int) -> float: + """Returns the average rate (%) that the coopertition bonus is reached by an alliance (average method). + (ignore) The following custom graphs are supported with this function: - Bar graph - :param team_number: The team to determine the average auto accuracy for. - :return: A float representing a percentage of the average auto accuracy of said team. + :param team_number_one: The first team within the alliance. + :param team_number_two: The second team within the alliance. + :param team_number_three: The third team within the alliance. + :return: A float representing the % rate of the alliance reaching the coopertition bonus. """ - return self.auto_accuracy_by_match(team_number).mean() + return self.reaches_coop_bonus_by_match(team_number_one, team_number_two, team_number_three).astype(int).mean() - def auto_accuracy_by_match(self, team_number: int) -> Series: - """Returns the auto accuracy of a team by match. + def reaches_coop_bonus_by_match(self, team_number_one: int, team_number_two: int, team_number_three: int) -> Series: + """Returns whether three teams within an alliance are able to reach the coopertition bonus within the first + 45 seconds of a match by match. (ignore) The following custom graphs are supported with this function: - Line graph - Box plot - Multi line graph - :param team_number: The team to determine the auto accuracy per match for. - :return: A series containing the auto accuracy by match for said team. + :param team_number_one: The first team within the alliance. + :param team_number_two: The second team within the alliance. + :param team_number_three: The third team within the alliance. + :return: Whether or not the alliance would reach the coopertition bonus requirement of one amp cycle in 45 sec. """ - auto_missed_by_match = self.stat_per_match( - team_number, - Queries.AUTO_MISSED - ) - auto_cycles_by_match = self.cycles_by_match( - team_number, - Queries.AUTO_GRID - ) + auto_missed_by_match # Adding auto missed in order to get an accurate % (2 scored + 1 missed = 33%) - return 1 - (auto_missed_by_match / auto_cycles_by_match) - + auto_amp_sufficient = ( + self.cycles_by_structure_per_match(team_number_one, Queries.AUTO_AMP) + + self.cycles_by_structure_per_match(team_number_two, Queries.AUTO_AMP) + + self.cycles_by_structure_per_match(team_number_three, Queries.AUTO_AMP) + ).apply(lambda total_auto_amp: total_auto_amp >= 1) + teleop_amp_sufficient = ( + self.cycles_by_structure_per_match(team_number_one, Queries.TELEOP_AMP) + + self.cycles_by_structure_per_match(team_number_two, Queries.TELEOP_AMP) + + self.cycles_by_structure_per_match(team_number_three, Queries.TELEOP_AMP) + ).apply(lambda total_teleop_amp: total_teleop_amp >= 1) # Should be able to put one down in the first 45 seconds, poor metric so should change later + + return auto_amp_sufficient | teleop_amp_sufficient + + # Rating methods def average_driver_rating(self, team_number: int) -> float: - """Returns the average driver rating of a team + """Returns the average driver rating of a team. :param team_number: The team to determine the driver rating for. :return: A float representing the average driver rating of said team. """ - return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].mean() + return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].apply( + lambda driver_rating: Criteria.DRIVER_RATING_CRITERIA.get(driver_rating, float("nan")) + ).mean() - def average_defense_rating(self, team_number: int) -> float: - """Returns the average defense rating of a team + def average_defense_time(self, team_number: int) -> float: + """Returns the average defense time of a team - :param team_number: The team to determine the defense rating for. - :return: A float representing the average defense rating of said team. + :param team_number: The team to determine the defense time for. + :return: A float representing the average defense time of said team. """ - return scouting_data_for_team(team_number, self.data)[Queries.DEFENSE_RATING].mean() - + return scouting_data_for_team(team_number, self.data)[Queries.DEFENSE_TIME].apply( + lambda defense_time: Criteria.DEFENSE_TIME_CRITERIA.get(defense_time, float("nan")) + ).mean() + + def average_defense_skill(self, team_number: int) -> float: + """Returns the average defense skill of a team. + + :param team_number: The team to determine the defense skill for. + :return: A float representing the average defense skill of said team. + """ + return scouting_data_for_team(team_number, self.data)[Queries.DEFENSE_SKILL].apply( + lambda defense_skill: Criteria.BASIC_RATING_CRITERIA.get(defense_skill, float("nan")) + ).mean() + + def average_counter_defense_skill(self, team_number: int) -> float: + """Returns the average counter defense skill (ability to swerve past defense) of a team. + + :param team_number: The team to determine the counter defense skill for. + :return: A float representing the average counter defense skill of said team. + """ + return scouting_data_for_team(team_number, self.data)[Queries.COUNTER_DEFENSE_SKIll].apply( + lambda counter_defense_skill: Criteria.BASIC_RATING_CRITERIA.get(counter_defense_skill, float("nan")) + ).mean() + def disables_by_team(self, team_number: int) -> float: """Returns a series of data representing the teams disables @@ -341,12 +345,15 @@ def cartesian_product( ]) def driving_index(self, team_number: int) -> float: - """Determines how fast a team is based on multiplying their teleop cycles by their driver rating. + """Determines how fast a team is based on multiplying their teleop cycles by their counter defense rating - Used for custom graphs with three teams. - Used for custom graphs with a full event. :param team_number: The team number to calculate a driving index for. """ - team_data = scouting_data_for_team(team_number, self.data) - return self.cycles_by_match(team_number, Queries.TELEOP_GRID).mean() * team_data[Queries.DRIVER_RATING].mean() + counter_defense_skill = self.average_counter_defense_skill(team_number) + return ( + self.average_cycles(team_number, Queries.TELEOP) + * 0 if isna(counter_defense_skill) else counter_defense_skill + ) diff --git a/src/utils/constants.py b/src/utils/constants.py index 91deb25..afe1de5 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -43,7 +43,7 @@ class GeneralConstants: class EventSpecificConstants: """Constants specific to an event.""" - EVENT_CODE = "2023new" + EVENT_CODE = "2024vaash" URL = f"https://raw.githubusercontent.com/team4099/ScoutingAppData/main/{EVENT_CODE}_match_data.json" PIT_SCOUTING_URL = ( f"https://raw.githubusercontent.com/team4099/ScoutingAppData/main/{EVENT_CODE}_pit_scouting_data.csv" @@ -65,38 +65,32 @@ class Queries: MATCH_NUMBER = "MatchNumber" TEAM_NUMBER = "TeamNumber" - AUTO_GRID = "AutoGrid" - AUTO_MISSED = "AutoMissed" - LEFT_COMMUNITY = "Mobile" - AUTO_ENGAGE_ATTEMPTED = "AutoAttemptedCharge" - AUTO_CHARGING_STATE = "AutoChargingState" - AUTO_CONES = "AutoCones" - AUTO_CUBES = "AutoCubes" + AUTO_SPEAKER = "AutoSpeaker" + AUTO_AMP = "AutoAmp" + LEFT_STARTING_ZONE = "AutoLeave" - TELEOP_GRID = "TeleopGrid" - ENDGAME_FINAL_CHARGE = "EndgameFinalCharge" + TELEOP_SPEAKER = "TeleopSpeaker" + TELEOP_AMP = "TeleopAmp" + TELEOP_TRAP = "TeleopTrap" - DRIVER_RATING = "DriverRating" - DEFENSE_RATING = "DefenseRating" - DISABLE = "Disable" - - # Constants for different heights - LOW = "L" - MID = "M" - HIGH = "H" + PARKED_UNDER_STAGE = "Parked" + CLIMBED_CHAIN = "ClimbStatus" + HARMONIZED_ON_CHAIN = "Harmonized" + CLIMB_SPEED = "Slow/Fast" - # Constants for different game pieces - CONE = "cone" - CUBE = "cube" + DRIVER_RATING = "DriverRating" + DEFENSE_TIME = "DefenseTime" + DEFENSE_SKILL = "DefenseSkill" + COUNTER_DEFENSE_SKIll = "CounterDefenseSkill" + DISABLE = "Disabled" # Alliance constants RED_ALLIANCE = "red" BLUE_ALLIANCE = "blue" - # Grid placements - LEFT = "left" - COOP = "coop" - RIGHT = "right" + # Modes + AUTO = "Auto" + TELEOP = "Teleop" # Custom graph keywords ONE_TEAM_KEYWORD = "Used for custom graphs with one team." @@ -108,41 +102,41 @@ class Criteria: """Criteria used in `CalculatedStats`.""" # Autonomous criteria - AUTO_GRID_POINTAGE = { - Queries.LOW: 3, - Queries.MID: 4, - Queries.HIGH: 6 - } - MOBILITY_CRITERIA = { + BOOLEAN_CRITERIA = { 0: 0, "false": 0, 1: 1, - "true": 1 - } - AUTO_CHARGE_POINTAGE = { - "Dock": 8, - "Engage": 12 - } - AUTO_ATTEMPT_CRITERIA = { - "Engage": 1 - } - SUCCESSFUL_ENGAGE_CRITERIA = { - "Engage": 1 - } - SUCCESSFUL_DOCK_CRITERIA = { - "Dock": 1 - } - - # Teleop Criteria - TELEOP_GRID_POINTAGE = { - Queries.LOW: 2, - Queries.MID: 3, - Queries.HIGH: 5 + "true": 1, + False: 0, + True: 1 } # Endgame Criteria - ENDGAME_POINTAGE = { + CLIMBING_POINTAGE = { "Park": 2, "Dock": 6, "Engage": 10 } + + # Ratings criteria + DRIVER_RATING_CRITERIA = { + "Very Fluid": 5, + "Fluid": 4, + "Average": 3, + "Poor": 2, + "Very Poor": 1 + } + DEFENSE_TIME_CRITERIA = { + "Very Often": 5, + "Often": 4, + "Sometimes": 3, + "Rarely": 2, + "Never": 1 + } + BASIC_RATING_CRITERIA = { + "Very Good": 5, + "Good": 4, + "Okay": 3, + "Poor": 2, + "Very Poor": 1 + } \ No newline at end of file diff --git a/src/utils/functions.py b/src/utils/functions.py index a1f8061..b0023d9 100644 --- a/src/utils/functions.py +++ b/src/utils/functions.py @@ -1,5 +1,6 @@ """Defines utility functions that are later used in FalconVis.""" from io import StringIO +from json import load from re import search from typing import Any @@ -83,16 +84,20 @@ def retrieve_match_schedule() -> DataFrame: key=lambda match_info: (match_levels_to_order[match_info["comp_level"]], match_info["match_number"]) ) - return DataFrame.from_dict( - [ - { - "match_key": match["key"].replace(f"{EventSpecificConstants.EVENT_CODE}_", ""), - "red_alliance": [int(team[3:]) for team in match["alliances"]["red"]["team_keys"]], - "blue_alliance": [int(team[3:]) for team in match["alliances"]["blue"]["team_keys"]] - } - for match in event_matches - ] - ) + if event_matches: + return DataFrame.from_dict( + [ + { + "match_key": match["key"].replace(f"{EventSpecificConstants.EVENT_CODE}_", ""), + "red_alliance": [int(team[3:]) for team in match["alliances"]["red"]["team_keys"]], + "blue_alliance": [int(team[3:]) for team in match["alliances"]["blue"]["team_keys"]] + } + for match in event_matches + ] + ) + else: # Load match schedule from local files + with open("src/data/match_schedule.json") as file: + return DataFrame.from_dict(load(file)) def scouting_data_for_team(team_number: int, scouting_data: DataFrame | None = None) -> DataFrame: From cb2e812a727cc148a08aa0b73100c9d9736be32a Mon Sep 17 00:00:00 2001 From: Shom770 Date: Wed, 7 Feb 2024 21:43:33 -0500 Subject: [PATCH 02/19] finished auto graphs --- src/page_managers/match_manager.py | 135 ++++++++++++++++++++++------- 1 file changed, 106 insertions(+), 29 deletions(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index 8287fc6..c173841 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -58,12 +58,12 @@ def generate_input_section(self) -> list[list, list]: # Filter through matches where the selected team plays in. match_schedule = match_schedule[ match_schedule["red_alliance"] - .apply(lambda alliance: ",".join(map(str, alliance))) - .str.contains(filter_by_team_number) + .apply(lambda alliance: ",".join(map(str, alliance))) + .str.contains(filter_by_team_number) | match_schedule["blue_alliance"] - .apply(lambda alliance: ",".join(map(str, alliance))) - .str.contains(filter_by_team_number) - ] + .apply(lambda alliance: ",".join(map(str, alliance))) + .str.contains(filter_by_team_number) + ] match_chosen = match_selector_col.selectbox( "Choose Match", match_schedule["match_key"] @@ -128,7 +128,7 @@ def generate_hypothetical_input_section(self) -> list[list, list]: ] def generate_match_prediction_dashboard( - self, red_alliance: list[int], blue_alliance: list[int] + self, red_alliance: list[int], blue_alliance: list[int] ) -> None: """Generates metrics for match predictions (Red vs. Blue Tab). @@ -152,13 +152,13 @@ def generate_match_prediction_dashboard( # Calculate mean and standard deviation of the point distribution of the red alliance. red_alliance_std = ( - sum( - [ - np.std(team_distribution) ** 2 - for team_distribution in red_alliance_points - ] - ) - ** 0.5 + sum( + [ + np.std(team_distribution) ** 2 + for team_distribution in red_alliance_points + ] + ) + ** 0.5 ) red_alliance_mean = sum( [ @@ -169,13 +169,13 @@ def generate_match_prediction_dashboard( # Calculate mean and standard deviation of the point distribution of the blue alliance. blue_alliance_std = ( - sum( - [ - np.std(team_distribution) ** 2 - for team_distribution in blue_alliance_points - ] - ) - ** 0.5 + sum( + [ + np.std(team_distribution) ** 2 + for team_distribution in blue_alliance_points + ] + ) + ** 0.5 ) blue_alliance_mean = sum( [ @@ -185,7 +185,7 @@ def generate_match_prediction_dashboard( ) # Calculate mean and standard deviation of the point distribution of red alliance - blue alliance - compared_std = (red_alliance_std**2 + blue_alliance_std**2) ** 0.5 + compared_std = (red_alliance_std ** 2 + blue_alliance_std ** 2) ** 0.5 compared_mean = red_alliance_mean - blue_alliance_mean # Use sentinel value if there isn't enough of a distribution yet to determine standard deviation. @@ -293,7 +293,7 @@ def generate_match_prediction_dashboard( ) def generate_match_prediction_graphs( - self, red_alliance: list[int], blue_alliance: list[int], type_of_graph: str + self, red_alliance: list[int], blue_alliance: list[int], type_of_graph: str ) -> None: """Generate graphs for match prediction (Red vs. Blue tab). @@ -354,10 +354,10 @@ def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: l ) def generate_autonomous_graphs( - self, - team_numbers: list[int], - type_of_graph: str, - color_gradient: list[str] + self, + team_numbers: list[int], + type_of_graph: str, + color_gradient: list[str] ) -> None: """Generates the autonomous graphs for the `Match` page. @@ -368,7 +368,86 @@ def generate_autonomous_graphs( """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - # TODO: add auton graphs + best_auto_config_col, auto_cycles_breakdown_col = st.columns(2, gap="large") + + # Best auto configuration graph + with best_auto_config_col: + if display_cycle_contributions: + best_autos_by_team = sorted( + [ + (team_number, self.calculated_stats.cycles_by_match(team_number, Queries.AUTO).max()) + for team_number in team_numbers + ], + key=lambda pair: pair[1], + reverse=True + ) + else: + best_autos_by_team = sorted( + [ + ( + team_number, self.calculated_stats.points_contributed_by_match(team_number, Queries.AUTO).max()) + for team_number in team_numbers + ], + key=lambda pair: pair[1], + reverse=True + ) + + plotly_chart( + bar_graph( + [pair[0] for pair in best_autos_by_team], + [pair[1] for pair in best_autos_by_team], + x_axis_label="Teams", + y_axis_label=( + "# of Cycles in Auto" + if display_cycle_contributions + else "# of Points in Auto" + ), + title="Best Auto Configuration", + color=color_gradient[1] + ) + ) + + # Auto cycle breakdown graph + with auto_cycles_breakdown_col: + if display_cycle_contributions: + average_speaker_cycles_by_team = [ + self.calculated_stats.average_cycles_for_structure(team, Queries.AUTO_SPEAKER) + for team in team_numbers + ] + average_amp_cycles_by_team = [ + self.calculated_stats.average_cycles_for_structure(team, Queries.AUTO_AMP) + for team in team_numbers + ] + else: + average_speaker_cycles_by_team = [ + self.calculated_stats.average_cycles_for_structure(team, Queries.AUTO_SPEAKER) * 5 + for team in team_numbers + ] + average_amp_cycles_by_team = [ + self.calculated_stats.average_cycles_for_structure(team, Queries.AUTO_AMP) * 2 + for team in team_numbers + ] + + plotly_chart( + stacked_bar_graph( + team_numbers, + [average_speaker_cycles_by_team, average_amp_cycles_by_team], + "Teams", + [ + ("Avg. Speaker Cycles" if display_cycle_contributions else "Avg. Speaker Points"), + ("Avg. Amp Cycles" if display_cycle_contributions else "Avg. Amp Points") + ], + ("Total Auto Cycles" if display_cycle_contributions else "Total Auto Points"), + title="Auto Scoring Breakdown", + color_map={ + ("Avg. Speaker Cycles" if display_cycle_contributions else "Avg. Speaker Points"): color_gradient[1], + ("Avg. Amp Cycles" if display_cycle_contributions else "Avg. Amp Points"): color_gradient[2] + } + ).update_layout(xaxis={"categoryorder": "total descending"}) + ) + + + def generate_teleop_graphs( self, @@ -384,5 +463,3 @@ def generate_teleop_graphs( :return: """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - - # TODO: add teleop graphs \ No newline at end of file From 17e3925c8d77632586658e449bc5ef3bca002e8f Mon Sep 17 00:00:00 2001 From: AJaiman Date: Thu, 8 Feb 2024 18:50:54 -0500 Subject: [PATCH 03/19] Fixed bugs and made qualitative tab --- src/page_managers/match_manager.py | 66 +++++++++++++++++++++++++++++- src/pages/1_Match.py | 20 +++++++-- src/pages/2_Hypothetical_Match.py | 20 +++++++-- src/utils/calculated_stats.py | 4 +- 4 files changed, 98 insertions(+), 12 deletions(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index c173841..dcde843 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -447,8 +447,6 @@ def generate_autonomous_graphs( ) - - def generate_teleop_graphs( self, team_numbers: list[int], @@ -463,3 +461,67 @@ def generate_teleop_graphs( :return: """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS + + def generate_qualitative_graphs( + self, + team_numbers: list[int], + color_gradient: list[str] + ): + """Generates the qualitative graphs for the `Match` page. + + :param team_numbers: The teams to generate the graphs for. + :param color_gradient: The color gradient to use for graphs, depending on the alliance. + :return: + """ + driver_rating_by_team_col, defense_rating_by_team_col, disables_by_team_col = st.columns(3) + + with driver_rating_by_team_col: + driver_rating_by_team = [ + self.calculated_stats.average_driver_rating(team) + for team in team_numbers + ] + + plotly_chart( + bar_graph( + team_numbers, + driver_rating_by_team, + x_axis_label="Teams", + y_axis_label="Driver Rating (1-5)", + title="Average Driver Rating by Team", + color=color_gradient[0] + ) + ) + + with defense_rating_by_team_col: + defense_rating_by_team = [ + self.calculated_stats.average_defense_skill(team) + for team in team_numbers + ] + + plotly_chart( + bar_graph( + team_numbers, + defense_rating_by_team, + x_axis_label="Teams", + y_axis_label="Defense Rating (1-5)", + title="Average Defense Rating by Team", + color=color_gradient[1] + ) + ) + + with disables_by_team_col: + disables_by_team = [ + self.calculated_stats.disables_by_match(team).sum() + for team in team_numbers + ] + + plotly_chart( + bar_graph( + team_numbers, + disables_by_team, + x_axis_label="Teams", + y_axis_label="Disables", + title="Disables by Team", + color=color_gradient[2] + ) + ) diff --git a/src/pages/1_Match.py b/src/pages/1_Match.py index a806107..0adb642 100644 --- a/src/pages/1_Match.py +++ b/src/pages/1_Match.py @@ -59,8 +59,8 @@ color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT ) - red_auto_tab, red_teleop_tab = st.tabs( - ["🤖 Autonomous", "🎮 Teleop"] + red_auto_tab, red_teleop_tab, red_qualitative_tab = st.tabs( + ["🤖 Autonomous", "🎮 Teleop", "📝 Qualitative"] ) with red_auto_tab: @@ -100,6 +100,12 @@ type_of_graph=GraphType.POINT_CONTRIBUTIONS, color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT ) + + with red_qualitative_tab: + match_manager.generate_qualitative_graphs( + teams_selected[0], + color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT + ) with blue_alliance_tab: st.write("### :blue[Blue] Alliance Graphs") @@ -110,8 +116,8 @@ color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT ) - blue_auto_tab, blue_teleop_tab = st.tabs( - ["🤖 Autonomous", "🎮 Teleop"] + blue_auto_tab, blue_teleop_tab, blue_qualitative_tab = st.tabs( + ["🤖 Autonomous", "🎮 Teleop", "📝 Qualitative"] ) with blue_auto_tab: @@ -151,3 +157,9 @@ type_of_graph=GraphType.POINT_CONTRIBUTIONS, color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT ) + + with blue_qualitative_tab: + match_manager.generate_qualitative_graphs( + teams_selected[1], + color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT + ) diff --git a/src/pages/2_Hypothetical_Match.py b/src/pages/2_Hypothetical_Match.py index 5eca9f4..1584e39 100644 --- a/src/pages/2_Hypothetical_Match.py +++ b/src/pages/2_Hypothetical_Match.py @@ -59,8 +59,8 @@ color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT ) - red_auto_tab, red_teleop_tab = st.tabs( - ["🤖 Autonomous", "🎮 Teleop"] + red_auto_tab, red_teleop_tab, red_qualitative_tab = st.tabs( + ["🤖 Autonomous", "🎮 Teleop", "📝 Qualitative"] ) with red_auto_tab: @@ -100,6 +100,12 @@ type_of_graph=GraphType.POINT_CONTRIBUTIONS, color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT ) + + with red_qualitative_tab: + match_manager.generate_qualitative_graphs( + teams_selected[0], + color_gradient=GeneralConstants.RED_ALLIANCE_GRADIENT + ) with blue_alliance_tab: st.write("### :blue[Blue] Alliance Graphs") @@ -110,8 +116,8 @@ color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT ) - blue_auto_tab, blue_teleop_tab = st.tabs( - ["🤖 Autonomous", "🎮 Teleop"] + blue_auto_tab, blue_teleop_tab, blue_qualitative_tab = st.tabs( + ["🤖 Autonomous", "🎮 Teleop", "📝 Qualitative"] ) with blue_auto_tab: @@ -151,3 +157,9 @@ type_of_graph=GraphType.POINT_CONTRIBUTIONS, color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT ) + + with blue_qualitative_tab: + match_manager.generate_qualitative_graphs( + teams_selected[1], + color_gradient=GeneralConstants.BLUE_ALLIANCE_GRADIENT + ) diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 0e8d882..76963c1 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -250,8 +250,8 @@ def average_counter_defense_skill(self, team_number: int) -> float: lambda counter_defense_skill: Criteria.BASIC_RATING_CRITERIA.get(counter_defense_skill, float("nan")) ).mean() - def disables_by_team(self, team_number: int) -> float: - """Returns a series of data representing the teams disables + def disables_by_match(self, team_number: int) -> float: + """Returns a series of data representing the team's disables :param team_number: The team to find disable data for. :return: A series with the teams disable data. From 850a8e62d62faf2d2e16286ccc2017ba43797468 Mon Sep 17 00:00:00 2001 From: AJaiman Date: Thu, 8 Feb 2024 20:02:47 -0500 Subject: [PATCH 04/19] Added climbbreakdown --- src/page_managers/match_manager.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index dcde843..867e58d 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -460,8 +460,32 @@ def generate_teleop_graphs( :param color_gradient: The color gradient to use for graphs, depending on the alliance. :return: """ + teams_data = [scouting_data_for_team(team) for team in team_numbers] display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS + climb_breakdown_by_team_col, climb_speed_by_team = st.columns(2) + + with climb_breakdown_by_team_col: + normal_climbs_by_team = [ + team_data[Queries.HARMONIZED_ON_CHAIN].sum() + for team_data in teams_data + ] + harmonized_climbs_by_team = [ + team_data[Queries.CLIMBED_CHAIN].sum() - harmonized_climbs #This works but it shouldn't I think we have harmonized climbs and normal climbs reversed + for team_data, harmonized_climbs in zip(teams_data, normal_climbs_by_team) + ] + + plotly_chart( + stacked_bar_graph( + team_numbers, + [normal_climbs_by_team, harmonized_climbs_by_team], + x_axis_label="Teams", + y_axis_label= ["Normal Climbs", "Harmonized Climbs"], + title="Climbs by Team", + color_map={"Normal Climbs": color_gradient[1], "Harmonized Climbs": color_gradient[2]} + ) + ) + def generate_qualitative_graphs( self, team_numbers: list[int], From 92d60ffc8df33b3cb365bbcfb0c89aaa4692f44f Mon Sep 17 00:00:00 2001 From: AJaiman Date: Thu, 8 Feb 2024 20:43:36 -0500 Subject: [PATCH 05/19] Fast and slow climb graphs added --- src/page_managers/match_manager.py | 24 ++++++++++++++++++++++++ src/utils/constants.py | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index 867e58d..b4bacbe 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -481,11 +481,35 @@ def generate_teleop_graphs( [normal_climbs_by_team, harmonized_climbs_by_team], x_axis_label="Teams", y_axis_label= ["Normal Climbs", "Harmonized Climbs"], + y_axis_title="# of Climb Types", title="Climbs by Team", color_map={"Normal Climbs": color_gradient[1], "Harmonized Climbs": color_gradient[2]} ) ) + with climb_speed_by_team: + slow_climbs = [ + (team_data[Queries.CLIMB_SPEED] == "Slow").sum() + for team_data in teams_data + ] + + fast_climbs = [ + (team_data[Queries.CLIMB_SPEED] == "Fast").sum() + for team_data in teams_data + ] + + plotly_chart( + stacked_bar_graph( + team_numbers, + [slow_climbs, fast_climbs], + x_axis_label="Teams", + y_axis_label= ["Slow Climbs", "Fast Climbs"], + y_axis_title="# of Climb Speeds", + title="Climb Speeds by Team", + color_map={"Slow Climbs": color_gradient[1], "Fast Climbs": color_gradient[2]} + ) + ) + def generate_qualitative_graphs( self, team_numbers: list[int], diff --git a/src/utils/constants.py b/src/utils/constants.py index afe1de5..48248a5 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -76,7 +76,7 @@ class Queries: PARKED_UNDER_STAGE = "Parked" CLIMBED_CHAIN = "ClimbStatus" HARMONIZED_ON_CHAIN = "Harmonized" - CLIMB_SPEED = "Slow/Fast" + CLIMB_SPEED = "ClimbSpeed" DRIVER_RATING = "DriverRating" DEFENSE_TIME = "DefenseTime" From 13c343d9720e6f8fb8a68b3fb37da74fdc8be769 Mon Sep 17 00:00:00 2001 From: Shom770 Date: Thu, 8 Feb 2024 21:51:29 -0500 Subject: [PATCH 06/19] finished colored metric stuff --- src/page_managers/team_manager.py | 27 ++++-- src/utils/components/__init__.py | 1 + .../colored_metric_with_two_values.py | 89 +++++++++++++++++++ ...ored_metric_with_two_values_component.html | 26 ++++++ 4 files changed, 136 insertions(+), 7 deletions(-) create mode 100644 src/utils/components/colored_metric_with_two_values.py create mode 100644 src/utils/components/colored_metric_with_two_values_component.html diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index 9fe9da8..ca64c39 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -9,6 +9,7 @@ box_plot, CalculatedStats, colored_metric, + colored_metric_with_two_values, Criteria, GeneralConstants, GraphType, @@ -85,18 +86,30 @@ def generate_metrics(self, team_number: int) -> None: # Metric for average auto cycles with auto_cycle_col: - average_auto_cycles = self.calculated_stats.average_cycles( + average_auto_speaker_cycles = self.calculated_stats.average_cycles_for_structure( team_number, - Queries.AUTO + Queries.AUTO_SPEAKER ) - auto_cycles_for_percentile = self.calculated_stats.quantile_stat( + average_auto_amp_cycles = self.calculated_stats.average_cycles_for_structure( + team_number, + Queries.AUTO_AMP + ) + average_auto_speaker_cycles_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.average_cycles(team, Queries.AUTO) + lambda self, team: self.average_cycles_for_structure(team, Queries.AUTO_SPEAKER) ) - colored_metric( + average_auto_amp_cycles_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.average_cycles_for_structure(team, Queries.AUTO_AMP) + ) + + colored_metric_with_two_values( "Average Auto Cycles", - round(average_auto_cycles, 2), - threshold=auto_cycles_for_percentile + "Speaker / Amp", + round(average_auto_speaker_cycles, 2), + round(average_auto_amp_cycles, 2), + first_threshold=average_auto_speaker_cycles_for_percentile, + second_threshold=average_auto_amp_cycles_for_percentile ) # Metric for average teleop cycles diff --git a/src/utils/components/__init__.py b/src/utils/components/__init__.py index 51a4e4d..32ef3d3 100644 --- a/src/utils/components/__init__.py +++ b/src/utils/components/__init__.py @@ -1,3 +1,4 @@ from .alliance_breakdown import * from .colored_metric import * +from .colored_metric_with_two_values import * from .win_percentages import * diff --git a/src/utils/components/colored_metric_with_two_values.py b/src/utils/components/colored_metric_with_two_values.py new file mode 100644 index 0000000..241adbf --- /dev/null +++ b/src/utils/components/colored_metric_with_two_values.py @@ -0,0 +1,89 @@ +"""Creates a component to display colored metrics.""" + +from typing import Any, Callable +from streamlit.components.v1 import html + +__all__ = ["colored_metric_with_two_values"] + + +def colored_metric_with_two_values( + metric_title: str, + metric_subtitle: str, + metric_first_value: Any, + metric_second_value: Any, + *, + height: int = 130, + background_color: str = "#OE1117", + opacity: float = 1.0, + first_threshold: float | None = None, + second_threshold: float | None = None, + invert_threshold: bool = False, + value_formatter: Callable = None, + border_color: str | None = None, + border_opacity: float | None = None, + create_ring: bool = False, + ring_color: str = "#262730" +) -> None: + """Creates a card similar to st.metric that can be colored/customized. + + :param metric_title: The title for the colored metric. + :param metric_subtitle: The subtitle for the colored metric. + :param metric_first_value: The first value for the colored metric. + :param metric_second_value: The second value for the colored metric. + :param height: A number representing the height of the metric, in pixels. If not specified, the height is automatically found. + :param background_color: A hex code representing the background color of the metric. + :param opacity: The opacity of the metric if a background color exists. + :param first_threshold: If a threshold exists, change the background color to denote whether the metric "passes" a threshold. + :param second_threshold: If a threshold exists, change the background color to denote whether the metric "passes" a threshold (same as first_threshold but for a diff value). + :param invert_threshold: Determines whether or not to invert the threshold (greater than threshold = red) + :param value_formatter: Optional argument that formats the metric value passed in. + :param border_color: A hex code representing the color of the border attached to the metric. + :param border_opacity: The opacity of the border if it exists. + :param create_ring: A boolean representing whether a ring should be created around the metric. + :param ring_color: A hex code representing the color of the ring if it exists. + :return: + """ + # Set background color based on threshold + if first_threshold is not None and second_threshold is not None: + if ( + ((metric_first_value >= first_threshold or metric_second_value >= second_threshold) and not invert_threshold) + or ((metric_first_value <= first_threshold or metric_first_value <= second_threshold) and invert_threshold) + ): + background_color = "#052e16" + opacity = 0.5 + + if ( + ((metric_first_value < first_threshold or metric_second_value < second_threshold) and not invert_threshold) + or ((metric_first_value > first_threshold or metric_second_value > second_threshold) and invert_threshold) + ): + background_color = "#450a0a" + opacity = 0.5 + + # Style card to use background color if border color isn't defined + if border_color is None: + border_color = background_color + border_opacity = (1 if border_opacity is None else border_opacity) + + with open("./src/utils/components/colored_metric_with_two_values_component.html") as html_file: + html_template = html_file.read().format( + metric_title=metric_title, + metric_subtitle=metric_subtitle, + metric_first_value=(str(metric_first_value) if value_formatter is None else value_formatter(metric_first_value)), + metric_second_value=(str(metric_second_value) if value_formatter is None else value_formatter(metric_second_value)), + height=f"[{height}px]", + background_color=background_color, + opacity=str(opacity), + border_color=border_color, + border_opacity=border_opacity, + ring=( + "" + if not create_ring + else f"ring ring-[{ring_color}] ring-offset-2" + ) + ) + + html( + html_template, + height=height + ) + diff --git a/src/utils/components/colored_metric_with_two_values_component.html b/src/utils/components/colored_metric_with_two_values_component.html new file mode 100644 index 0000000..0bf853a --- /dev/null +++ b/src/utils/components/colored_metric_with_two_values_component.html @@ -0,0 +1,26 @@ + + + + +
+
+

+ {metric_title} +

+

+ {metric_subtitle} +

+
+
+

+ {metric_first_value} +

+

+ / +

+

+ {metric_second_value} +

+
+
+ From 213d4f08f90a220d75a20a3172254e4dfd73f2ac Mon Sep 17 00:00:00 2001 From: Shom770 Date: Fri, 9 Feb 2024 09:19:24 -0500 Subject: [PATCH 07/19] create coop metric --- src/page_managers/match_manager.py | 18 ++++++++++++++---- src/utils/calculated_stats.py | 26 +++++++++----------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index c173841..de376fe 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -313,7 +313,7 @@ def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: l :param color_gradient: The color gradient to use for graphs, depending on the alliance. :return: """ - fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col = st.columns(3) + fastest_cycler_col, second_fastest_cycler_col, slowest_cycler_col, reaches_coop_col = st.columns(4) fastest_cyclers = sorted( { @@ -353,6 +353,19 @@ def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: l border_opacity=0.9 ) + # Colored metric displaying the chance of reaching the co-op bonus (1 amp cycle in 45 seconds + auto) + with reaches_coop_col: + coop_by_match = [self.calculated_stats.reaches_coop_bonus_by_match(team) for team in team_numbers] + possible_coop_combos = self.calculated_stats.cartesian_product(*coop_by_match) + + colored_metric( + "Chance of Co-Op Bonus", + f"{len([combo for combo in possible_coop_combos if any(combo)]) / len(possible_coop_combos):.0%}", + background_color=color_gradient[3], + opacity=0.4, + border_opacity=0.9 + ) + def generate_autonomous_graphs( self, team_numbers: list[int], @@ -446,9 +459,6 @@ def generate_autonomous_graphs( ).update_layout(xaxis={"categoryorder": "total descending"}) ) - - - def generate_teleop_graphs( self, team_numbers: list[int], diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 0e8d882..2aa76ff 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -168,21 +168,19 @@ def potential_amplification_periods_by_match(self, team_number: int) -> Series: return self.cycles_by_structure_per_match(team_number, (Queries.AUTO_AMP, Queries.TELEOP_AMP)) // 2 # Alliance-wide methods - def average_coop_bonus_rate(self, team_number_one: int, team_number_two: int, team_number_three: int) -> float: + def average_coop_bonus_rate(self, team_number: int) -> float: """Returns the average rate (%) that the coopertition bonus is reached by an alliance (average method). (ignore) The following custom graphs are supported with this function: - Bar graph - :param team_number_one: The first team within the alliance. - :param team_number_two: The second team within the alliance. - :param team_number_three: The third team within the alliance. + :param team_number: The team to calculate the average coop bonus rate for. :return: A float representing the % rate of the alliance reaching the coopertition bonus. """ - return self.reaches_coop_bonus_by_match(team_number_one, team_number_two, team_number_three).astype(int).mean() + return self.reaches_coop_bonus_by_match(team_number).astype(int).mean() - def reaches_coop_bonus_by_match(self, team_number_one: int, team_number_two: int, team_number_three: int) -> Series: + def reaches_coop_bonus_by_match(self, team_number: int) -> Series: """Returns whether three teams within an alliance are able to reach the coopertition bonus within the first 45 seconds of a match by match. (ignore) @@ -191,21 +189,15 @@ def reaches_coop_bonus_by_match(self, team_number_one: int, team_number_two: int - Box plot - Multi line graph - :param team_number_one: The first team within the alliance. - :param team_number_two: The second team within the alliance. - :param team_number_three: The third team within the alliance. + :param team_number: The team to determine the coop bonus rate by match for. :return: Whether or not the alliance would reach the coopertition bonus requirement of one amp cycle in 45 sec. """ - auto_amp_sufficient = ( - self.cycles_by_structure_per_match(team_number_one, Queries.AUTO_AMP) - + self.cycles_by_structure_per_match(team_number_two, Queries.AUTO_AMP) - + self.cycles_by_structure_per_match(team_number_three, Queries.AUTO_AMP) + auto_amp_sufficient = self.cycles_by_structure_per_match( + team_number, Queries.AUTO_AMP ).apply(lambda total_auto_amp: total_auto_amp >= 1) teleop_amp_sufficient = ( - self.cycles_by_structure_per_match(team_number_one, Queries.TELEOP_AMP) - + self.cycles_by_structure_per_match(team_number_two, Queries.TELEOP_AMP) - + self.cycles_by_structure_per_match(team_number_three, Queries.TELEOP_AMP) - ).apply(lambda total_teleop_amp: total_teleop_amp >= 1) # Should be able to put one down in the first 45 seconds, poor metric so should change later + self.cycles_by_structure_per_match(team_number, Queries.TELEOP_AMP) + ).apply(lambda total_teleop_amp: total_teleop_amp >= 1) return auto_amp_sufficient | teleop_amp_sufficient From 67e8e8c16122152d8ad78d8dd8a96200e030ec3c Mon Sep 17 00:00:00 2001 From: Shom770 Date: Fri, 9 Feb 2024 17:23:07 -0500 Subject: [PATCH 08/19] finished match dashboard --- src/page_managers/match_manager.py | 141 ++++++++++++++++++++++++++++- 1 file changed, 140 insertions(+), 1 deletion(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index de376fe..5dc2289 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -304,7 +304,146 @@ def generate_match_prediction_graphs( combined_teams = red_alliance + blue_alliance display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS color_sequence = ["#781212", "#163ba1"] # Bright red # Bright blue - # TODO: Add match prediction graphs + + structure_breakdown_col, auto_cycles_col = st.columns(2) + teleop_cycles_col, cumulative_cycles_col = st.columns(2) + + # Breaks down where the different teams scored among the six teams + with structure_breakdown_col: + structure_breakdown = [ + [ + self.calculated_stats.cycles_by_structure_per_match( + team, structures + ).sum() + for team in combined_teams + ] + for structures in ( + (Queries.AUTO_AMP, Queries.TELEOP_AMP), + (Queries.AUTO_SPEAKER, Queries.TELEOP_SPEAKER), + Queries.TELEOP_TRAP + ) + ] + + plotly_chart( + stacked_bar_graph( + combined_teams, + structure_breakdown, + "Teams", + ["# of Amp Cycles", "# of Speaker Cycles", "# of Trap Cycles"], + "Total Cycles Scored into Structures", + title="Structure Breakdown", + color_map={ + "# of Amp Cycles": GeneralConstants.GOLD_GRADIENT[0], + "# of Speaker Cycles": GeneralConstants.GOLD_GRADIENT[1], + "# of Trap Cycles": GeneralConstants.GOLD_GRADIENT[2] + }, + ).update_layout(xaxis={"categoryorder": "total descending"}) + ) + + # Breaks down cycles/point contributions among both alliances in Autonomous. + with auto_cycles_col: + auto_alliance_distributions = [] + + for alliance in (red_alliance, blue_alliance): + cycles_in_alliance = [ + ( + self.calculated_stats.cycles_by_match(team, Queries.AUTO) + if display_cycle_contributions + else self.calculated_stats.points_contributed_by_match( + team, Queries.AUTO + ) + ) + for team in alliance + ] + auto_alliance_distributions.append( + self.calculated_stats.cartesian_product( + *cycles_in_alliance, reduce_with_sum=True + ) + ) + + plotly_chart( + box_plot( + ["Red Alliance", "Blue Alliance"], + auto_alliance_distributions, + y_axis_label=( + "Notes Scored" + if display_cycle_contributions + else "Points Contributed" + ), + title=( + f"Notes During Autonomous (N={len(auto_alliance_distributions[0])})" + if display_cycle_contributions + else f"Points Contributed During Autonomous (N={len(auto_alliance_distributions[0])})" + ), + color_sequence=color_sequence, + ) + ) + + # Breaks down cycles/point contributions among both alliances in Teleop. + with teleop_cycles_col: + teleop_alliance_distributions = [] + + for alliance in (red_alliance, blue_alliance): + cycles_in_alliance = [ + ( + self.calculated_stats.cycles_by_match(team, Queries.TELEOP) + if display_cycle_contributions + else self.calculated_stats.points_contributed_by_match( + team, Queries.TELEOP + ) + ) + for team in alliance + ] + teleop_alliance_distributions.append( + self.calculated_stats.cartesian_product( + *cycles_in_alliance, reduce_with_sum=True + ) + ) + + plotly_chart( + box_plot( + ["Red Alliance", "Blue Alliance"], + teleop_alliance_distributions, + y_axis_label=( + "Notes Scored" + if display_cycle_contributions + else "Points Contributed" + ), + title=( + f"Notes During Teleop (N={len(teleop_alliance_distributions[0])})" + if display_cycle_contributions + else f"Points Contributed During Teleop (N={len(teleop_alliance_distributions[0])})" + ), + color_sequence=color_sequence, + ) + ) + + # Show cumulative cycles/point contributions (auto and teleop) + with cumulative_cycles_col: + cumulative_alliance_distributions = [ + auto_distribution + teleop_distribution + for auto_distribution, teleop_distribution in zip( + auto_alliance_distributions, teleop_alliance_distributions + ) + ] + + plotly_chart( + box_plot( + ["Red Alliance", "Blue Alliance"], + cumulative_alliance_distributions, + y_axis_label=( + "Notes Scored" + if display_cycle_contributions + else "Points Contributed" + ), + title=( + f"Notes During Auto + Teleop (N={len(cumulative_alliance_distributions[0])})" + if display_cycle_contributions + else f"Points Contributed During Auto + Teleop (N={len(cumulative_alliance_distributions[0])})" + ), + color_sequence=color_sequence, + ) + ) def generate_alliance_dashboard(self, team_numbers: list[int], color_gradient: list[str]) -> None: """Generates an alliance dashboard in the `Match` page. From aac40c47440f0107ddc2cc5b04e37d785c137e3c Mon Sep 17 00:00:00 2001 From: Shom770 Date: Fri, 9 Feb 2024 17:34:03 -0500 Subject: [PATCH 09/19] finished speaker/amp graphs --- src/page_managers/event_manager.py | 150 +++++++++++++++++++++++++++-- 1 file changed, 142 insertions(+), 8 deletions(-) diff --git a/src/page_managers/event_manager.py b/src/page_managers/event_manager.py index c8326e3..2f769c8 100644 --- a/src/page_managers/event_manager.py +++ b/src/page_managers/event_manager.py @@ -28,28 +28,52 @@ def __init__(self): ) @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) - def _retrieve_cycle_distributions(_self, type_of_grid: str) -> list: + def _retrieve_cycle_distributions(_self, mode: str) -> list: """Retrieves cycle distributions across an event for autonomous/teleop. - :param type_of_grid: The mode to retrieve cycle data for (autonomous/teleop). + :param mode: The mode to retrieve cycle data for (autonomous/teleop). :return: A list containing the cycle distirbutions for each team. """ teams = retrieve_team_list() return [ - _self.calculated_stats.cycles_by_match(team, type_of_grid) + _self.calculated_stats.cycles_by_match(team, mode) for team in teams ] @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) - def _retrieve_point_distributions(_self, type_of_grid: str) -> list: + def _retrieve_point_distributions(_self, mode: str) -> list: """Retrieves point distributions across an event for autonomous/teleop. - :param type_of_grid: The mode to retrieve point contribution data for (autonomous/teleop). - :return: A list containing the point distirbutions for each team. + :param mode: The mode to retrieve point contribution data for (autonomous/teleop). + :return: A list containing the point distributions for each team. """ teams = retrieve_team_list() return [ - _self.calculated_stats.points_contributed_by_match(team, type_of_grid) + _self.calculated_stats.points_contributed_by_match(team, mode) + for team in teams + ] + + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_speaker_cycle_distributions(_self) -> list: + """Retrieves the distribution of speaker cycles for each team across an event for auto/teleop. + + :return: A list containing the speaker cycle distributions for each team. + """ + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_structure_per_match(team, (Queries.AUTO_SPEAKER, Queries.TELEOP_SPEAKER)) + for team in teams + ] + + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_amp_cycle_distributions(_self) -> list: + """Retrieves the distribution of amp cycles for each team across an event for auto/teleop. + + :return: A list containing the amp cycle distributions for each team. + """ + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_structure_per_match(team, (Queries.AUTO_AMP, Queries.TELEOP_AMP)) for team in teams ] @@ -105,5 +129,115 @@ def generate_event_graphs(self, type_of_graph: str) -> None: """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS teams = retrieve_team_list() + auto_cycles_col, teleop_cycles_col = st.columns(2, gap="large") + speaker_cycles_col, amp_cycles_col= st.columns(2, gap="large") + + # Display event-wide graph surrounding each team and their cycle distributions with the Speaker. + with speaker_cycles_col: + variable_key = f"speaker_cycles_col_{type_of_graph}" + + speaker_distributions = self._retrieve_speaker_cycle_distributions() + speaker_sorted_distributions = dict( + sorted( + zip(teams, speaker_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + speaker_sorted_teams = list(speaker_sorted_distributions.keys()) + speaker_distributions = list(speaker_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + speaker_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + speaker_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions to the Speaker" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevSpeaker{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextSpeaker{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + # Display event-wide graph surrounding each team and their cycle contributions to the Amp. + with amp_cycles_col: + variable_key = f"amp_cycles_col_{type_of_graph}" + + amp_distributions = self._retrieve_amp_cycle_distributions() + amp_sorted_distributions = dict( + sorted( + zip(teams, amp_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + amp_sorted_teams = list(amp_sorted_distributions.keys()) + amp_distributions = list(amp_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + amp_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + amp_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions to the Amp" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevAmp{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() - # TODO: Add event graphs \ No newline at end of file + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextAmp{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() \ No newline at end of file From fb04768c5ef3577ea5aa5d57bc06a72025985006 Mon Sep 17 00:00:00 2001 From: 00magikarp <94652654+00magikarp@users.noreply.github.com> Date: Sun, 11 Feb 2024 17:59:13 -0500 Subject: [PATCH 10/19] add metrics: trap-score ability, times climbed, harmonize ability --- src/page_managers/team_manager.py | 46 ++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index 9fe9da8..995ca54 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -50,7 +50,7 @@ def generate_metrics(self, team_number: int) -> None: :param team_number: The team number to calculate the metrics for. """ points_contributed_col, drivetrain_col, auto_cycle_col, teleop_cycle_col = st.columns(4) - iqr_col, auto_engage_col, auto_engage_accuracy_col, auto_accuracy_col = st.columns(4) + iqr_col, trap_ability_col, times_climbed_col, harmonize_ability_col = st.columns(4) # Metric for avg. points contributed with points_contributed_col: @@ -115,6 +115,47 @@ def generate_metrics(self, team_number: int) -> None: threshold=teleop_cycles_for_percentile ) + # TODO: the next 3 metrics are likely wrong + # Metric for ability to score trap + with trap_ability_col: + trap_ability = self.calculated_stats.average_stat( + team_number, + Queries.TELEOP_TRAP, + Criteria.BOOLEAN_CRITERIA + ) + colored_metric( + "Able To Score Trap", + "Yes" if trap_ability > 0 else "No", + ) + + # Metric for total times climbed + with times_climbed_col: + times_climbed = self.calculated_stats.cumulative_stat( + team_number, + Queries.CLIMBED_CHAIN, + ) + times_climbed_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN) + ) + colored_metric( + "Times Climbed", + times_climbed, + threshold=times_climbed_for_percentile + ) + + # Metric for ability to harmonize + with harmonize_ability_col: + harmonize_ability = self.calculated_stats.average_stat( + team_number, + Queries.HARMONIZED_ON_CHAIN, + Criteria.BOOLEAN_CRITERIA + ) + colored_metric( + "Able To Harmonize", + "Yes" if harmonize_ability > 0 else "No" + ) + # Metric for IQR of points contributed (consistency) with iqr_col: team_dataset = self.calculated_stats.points_contributed_by_match( @@ -135,9 +176,6 @@ def generate_metrics(self, team_number: int) -> None: invert_threshold=True ) - # TODO: Add the other metrics - - def generate_autonomous_graphs( self, team_number: int, From f22c621da2db1668ca17eb1a57abb607d7228478 Mon Sep 17 00:00:00 2001 From: SayanC4 <103901872+SayanC4@users.noreply.github.com> Date: Sun, 11 Feb 2024 23:38:16 -0500 Subject: [PATCH 11/19] Add auto graph --- src/page_managers/event_manager.py | 69 +++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/src/page_managers/event_manager.py b/src/page_managers/event_manager.py index 2f769c8..e008440 100644 --- a/src/page_managers/event_manager.py +++ b/src/page_managers/event_manager.py @@ -77,6 +77,18 @@ def _retrieve_amp_cycle_distributions(_self) -> list: for team in teams ] + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_auto_cycle_distributions(_self) -> list: + """Retrieves the distribution of autonomous cycles for each team across an event with speaker/amp. + + :return: A list containing the autonomous cycle distributions for each team. + """ + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_structure_per_match(team, (Queries.AUTO_SPEAKER, Queries.AUTO_AMP)) + for team in teams + ] + def generate_input_section(self) -> None: """Defines that there are no inputs for the event page, showing event-wide graphs.""" return @@ -240,4 +252,59 @@ def generate_event_graphs(self, type_of_graph: str) -> None: disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) ): st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY - st.experimental_rerun() \ No newline at end of file + st.experimental_rerun() + + # Display event-wide graph surrounding each team and their cycle distributions in the Autonomous period. + with auto_cycles_col: + variable_key = f"auto_cycles_col_{type_of_graph}" + + auto_distributions = self._retrieve_auto_cycle_distributions() + auto_sorted_distributions = dict( + sorted( + zip(teams, auto_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + auto_sorted_teams = list(auto_sorted_distributions.keys()) + auto_distributions = list(auto_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + auto_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + auto_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions in Auto" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevAuto{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextAuto{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() From e1f207cbb47beb6d4de49dbf75cc3e9950b8de6e Mon Sep 17 00:00:00 2001 From: CodingMaster121 Date: Mon, 12 Feb 2024 00:12:02 -0500 Subject: [PATCH 12/19] Added total teleop graph --- src/page_managers/event_manager.py | 64 ++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/src/page_managers/event_manager.py b/src/page_managers/event_manager.py index 2f769c8..ed75bae 100644 --- a/src/page_managers/event_manager.py +++ b/src/page_managers/event_manager.py @@ -77,6 +77,13 @@ def _retrieve_amp_cycle_distributions(_self) -> list: for team in teams ] + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_teleop_distributions(_self) -> list: + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_match(team, Queries.TELEOP) for team in teams + ] + def generate_input_section(self) -> None: """Defines that there are no inputs for the event page, showing event-wide graphs.""" return @@ -238,6 +245,63 @@ def generate_event_graphs(self, type_of_graph: str) -> None: use_container_width=True, key=f"nextAmp{type_of_graph}", disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + with teleop_cycles_col: + variable_key = f"teleop_cycles_col_{type_of_graph}" + + teleop_distributions = ( + self._retrieve_teleop_distributions() + ) + + teleop_sorted_distributions = dict( + sorted( + zip(teams, teleop_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + teleop_sorted_teams = list(teleop_sorted_distributions.keys()) + teleop_distributions = list(teleop_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + teleop_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + teleop_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Teleop {'Cycle' if display_cycle_contributions else 'Point'} Distribution", + title=f"Teleop {'Cycle' if display_cycle_contributions else 'Point'} Contributions" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevTele{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextTele{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) ): st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY st.experimental_rerun() \ No newline at end of file From 6a1bdb8016852c9356a35b2d218c166cc8e5473b Mon Sep 17 00:00:00 2001 From: Shom770 Date: Mon, 12 Feb 2024 14:08:07 -0500 Subject: [PATCH 13/19] change to multi-line graph --- src/page_managers/match_manager.py | 50 +++++++++++++++++++++++++++++- src/utils/calculated_stats.py | 2 +- src/utils/constants.py | 6 ++-- 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index 5dc2289..0974910 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -6,7 +6,7 @@ from scipy.stats import norm from .page_manager import PageManager -from utils import ( +from ..utils import ( alliance_breakdown, bar_graph, box_plot, @@ -612,3 +612,51 @@ def generate_teleop_graphs( :return: """ display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS + speaker_cycles_over_time_col, amp_periods_over_time_col = st.columns(2, gap="large") + + # Display the teleop speaker cycles of each team over time + with speaker_cycles_over_time_col: + cycles_by_team = [ + self.calculated_stats.cycles_by_structure_per_match(team, Queries.TELEOP_SPEAKER) * + ( + 1 if display_cycle_contributions else 2 + ) + for team in team_numbers + ] + + plotly_chart( + multi_line_graph( + *populate_missing_data(cycles_by_team), + x_axis_label="Match Index", + y_axis_label=team_numbers, + y_axis_title=( + "# of Cycles" + if display_cycle_contributions + else "Points Contributed" + ), + title=( + "Teleop Speaker Cycles Over Time" + if display_cycle_contributions + else "Points Contributed in the Speaker Over Time" + ), + color_map=dict(zip(team_numbers, color_gradient)) + ) + ) + + # Display the teleop speaker cycles of each team over time + with amp_periods_over_time_col: + amp_periods_by_team = [ + self.calculated_stats.potential_amplification_periods_by_match(team) + for team in team_numbers + ] + + plotly_chart( + multi_line_graph( + *populate_missing_data(amp_periods_by_team), + x_axis_label="Match Index", + y_axis_label=team_numbers, + y_axis_title="# of Potential Amplification Periods", + title="Potential Amplification Periods Produced by Alliance", + color_map=dict(zip(team_numbers, color_gradient)) + ) + ) diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 2aa76ff..37957d5 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -6,7 +6,7 @@ from numpy import percentile from pandas import DataFrame, Series, isna -from .constants import Criteria, Queries +from .constants import Criteria, GeneralConstants, Queries from .functions import scouting_data_for_team, retrieve_team_list, retrieve_pit_scouting_data __all__ = ["CalculatedStats"] diff --git a/src/utils/constants.py b/src/utils/constants.py index afe1de5..8409d38 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -27,6 +27,7 @@ class GeneralConstants: BLUE_ALLIANCE_GRADIENT = ["#0b2e61", "#355687", "#7da0d1", "#a8c1e3"] GOLD_GRADIENT = ["#ffbd4d", "#ff9000", "#dd5f00"] LEVEL_GRADIENT = ["#f44a53", "#ff8800", "#f4c717"] + # Colors DARK_RED = "#450a0a" DARK_BLUE = "#172554" @@ -36,8 +37,9 @@ class GeneralConstants: CONE_COLOR = PRIMARY_COLOR CUBE_COLOR = "#4F46E5" - # Game-specific constants - CHARGE_STATION_LENGTH = 8 # In feet + # General game constants + TELEOP_TOTAL_TIME = (2 * 60 + 15) + TELEOP_MINUS_ENDGAME = TELEOP_TOTAL_TIME - 20 class EventSpecificConstants: From 3dd8960457b0f6789e970195ed4fd43809bbefa2 Mon Sep 17 00:00:00 2001 From: Shom770 Date: Mon, 12 Feb 2024 18:01:42 -0500 Subject: [PATCH 14/19] team page revitalized --- src/page_managers/team_manager.py | 86 +++++++++++++++++++------------ 1 file changed, 53 insertions(+), 33 deletions(-) diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index 2bcd844..b93af54 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -114,31 +114,64 @@ def generate_metrics(self, team_number: int) -> None: # Metric for average teleop cycles with teleop_cycle_col: - average_teleop_cycles = self.calculated_stats.average_cycles( + average_teleop_speaker_cycles = self.calculated_stats.average_cycles_for_structure( team_number, - Queries.TELEOP + Queries.TELEOP_SPEAKER ) - teleop_cycles_for_percentile = self.calculated_stats.quantile_stat( + average_teleop_amp_cycles = self.calculated_stats.average_cycles_for_structure( + team_number, + Queries.TELEOP_AMP + ) + average_teleop_speaker_cycles_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.average_cycles(team, Queries.TELEOP) + lambda self, team: self.average_cycles_for_structure(team, Queries.TELEOP_SPEAKER) ) - colored_metric( + average_teleop_amp_cycles_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.average_cycles_for_structure(team, Queries.TELEOP_AMP) + ) + + colored_metric_with_two_values( "Average Teleop Cycles", - round(average_teleop_cycles, 2), - threshold=teleop_cycles_for_percentile + "Speaker / Amp", + round(average_teleop_speaker_cycles, 2), + round(average_teleop_amp_cycles, 2), + first_threshold=average_teleop_speaker_cycles_for_percentile, + second_threshold=average_teleop_amp_cycles_for_percentile + ) + + # Metric for IQR of points contributed (consistency) + with iqr_col: + team_dataset = self.calculated_stats.points_contributed_by_match( + team_number + ) + iqr_of_points_contributed = self.calculated_stats.calculate_iqr(team_dataset) + iqr_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.calculate_iqr( + self.points_contributed_by_match(team) + ) + ) + + colored_metric( + "IQR of Points Contributed", + iqr_of_points_contributed, + threshold=iqr_for_percentile, + invert_threshold=True ) - # TODO: the next 3 metrics are likely wrong # Metric for ability to score trap with trap_ability_col: - trap_ability = self.calculated_stats.average_stat( + average_trap_cycles = self.calculated_stats.average_stat( team_number, Queries.TELEOP_TRAP, Criteria.BOOLEAN_CRITERIA ) colored_metric( - "Able To Score Trap", - "Yes" if trap_ability > 0 else "No", + "Can they score in the trap?", + average_trap_cycles, + threshold=0.01, + value_formatter=lambda value: "Yes" if value > 0 else "No" ) # Metric for total times climbed @@ -146,47 +179,34 @@ def generate_metrics(self, team_number: int) -> None: times_climbed = self.calculated_stats.cumulative_stat( team_number, Queries.CLIMBED_CHAIN, + Criteria.BOOLEAN_CRITERIA ) times_climbed_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN) + lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN, Criteria.BOOLEAN_CRITERIA) ) colored_metric( - "Times Climbed", + "# of Times Climbed", times_climbed, threshold=times_climbed_for_percentile ) # Metric for ability to harmonize with harmonize_ability_col: - harmonize_ability = self.calculated_stats.average_stat( + times_harmonized = self.calculated_stats.cumulative_stat( team_number, Queries.HARMONIZED_ON_CHAIN, Criteria.BOOLEAN_CRITERIA ) - colored_metric( - "Able To Harmonize", - "Yes" if harmonize_ability > 0 else "No" - ) - - # Metric for IQR of points contributed (consistency) - with iqr_col: - team_dataset = self.calculated_stats.points_contributed_by_match( - team_number - ) - iqr_of_points_contributed = self.calculated_stats.calculate_iqr(team_dataset) - iqr_for_percentile = self.calculated_stats.quantile_stat( + times_harmonized_for_percentile = self.calculated_stats.quantile_stat( 0.5, - lambda self, team: self.calculate_iqr( - self.points_contributed_by_match(team) - ) + lambda self, team: self.cumulative_stat(team, Queries.HARMONIZED_ON_CHAIN, Criteria.BOOLEAN_CRITERIA) ) colored_metric( - "IQR of Points Contributed", - iqr_of_points_contributed, - threshold=iqr_for_percentile, - invert_threshold=True + "# of Times Harmonized", + times_harmonized, + threshold=times_harmonized_for_percentile ) def generate_autonomous_graphs( From 897f7d4f2b47875f6489bb32b37a805ccee9c8c7 Mon Sep 17 00:00:00 2001 From: Shom770 Date: Tue, 13 Feb 2024 12:32:40 -0500 Subject: [PATCH 15/19] finished auto graphs --- src/page_managers/event_manager.py | 207 +++++++++++++++++++++++++- src/page_managers/picklist_manager.py | 4 + src/page_managers/team_manager.py | 49 +++++- src/pages/4_Picklist.py | 3 + src/utils/constants.py | 2 + 5 files changed, 254 insertions(+), 11 deletions(-) diff --git a/src/page_managers/event_manager.py b/src/page_managers/event_manager.py index c8326e3..fbd2740 100644 --- a/src/page_managers/event_manager.py +++ b/src/page_managers/event_manager.py @@ -28,28 +28,52 @@ def __init__(self): ) @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) - def _retrieve_cycle_distributions(_self, type_of_grid: str) -> list: + def _retrieve_cycle_distributions(_self, mode: str) -> list: """Retrieves cycle distributions across an event for autonomous/teleop. - :param type_of_grid: The mode to retrieve cycle data for (autonomous/teleop). + :param mode: The mode to retrieve cycle data for (autonomous/teleop). :return: A list containing the cycle distirbutions for each team. """ teams = retrieve_team_list() return [ - _self.calculated_stats.cycles_by_match(team, type_of_grid) + _self.calculated_stats.cycles_by_match(team, mode) for team in teams ] @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) - def _retrieve_point_distributions(_self, type_of_grid: str) -> list: + def _retrieve_point_distributions(_self, mode: str) -> list: """Retrieves point distributions across an event for autonomous/teleop. - :param type_of_grid: The mode to retrieve point contribution data for (autonomous/teleop). - :return: A list containing the point distirbutions for each team. + :param mode: The mode to retrieve point contribution data for (autonomous/teleop). + :return: A list containing the point distributions for each team. """ teams = retrieve_team_list() return [ - _self.calculated_stats.points_contributed_by_match(team, type_of_grid) + _self.calculated_stats.points_contributed_by_match(team, mode) + for team in teams + ] + + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_speaker_cycle_distributions(_self) -> list: + """Retrieves the distribution of speaker cycles for each team across an event for auto/teleop. + + :return: A list containing the speaker cycle distributions for each team. + """ + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_structure_per_match(team, (Queries.AUTO_SPEAKER, Queries.TELEOP_SPEAKER)) + for team in teams + ] + + @st.cache_data(ttl=GeneralConstants.SECONDS_TO_CACHE) + def _retrieve_amp_cycle_distributions(_self) -> list: + """Retrieves the distribution of amp cycles for each team across an event for auto/teleop. + + :return: A list containing the amp cycle distributions for each team. + """ + teams = retrieve_team_list() + return [ + _self.calculated_stats.cycles_by_structure_per_match(team, (Queries.AUTO_AMP, Queries.TELEOP_AMP)) for team in teams ] @@ -106,4 +130,171 @@ def generate_event_graphs(self, type_of_graph: str) -> None: display_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS teams = retrieve_team_list() - # TODO: Add event graphs \ No newline at end of file + auto_cycles_col, teleop_cycles_col = st.columns(2, gap="large") + speaker_cycles_col, amp_cycles_col = st.columns(2, gap="large") + + + # Display event-wide graph surrounding each team and their cycle distributions in the Autonomous period. + with auto_cycles_col: + variable_key = f"auto_cycles_col_{type_of_graph}" + + auto_distributions = self._retrieve_cycle_distributions(Queries.AUTO) + auto_sorted_distributions = dict( + sorted( + zip(teams, auto_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + auto_sorted_teams = list(auto_sorted_distributions.keys()) + auto_distributions = list(auto_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + auto_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + auto_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions in Auto" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevAuto{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextAuto{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + # Display event-wide graph surrounding each team and their cycle distributions with the Speaker. + with speaker_cycles_col: + variable_key = f"speaker_cycles_col_{type_of_graph}" + + speaker_distributions = self._retrieve_speaker_cycle_distributions() + speaker_sorted_distributions = dict( + sorted( + zip(teams, speaker_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + speaker_sorted_teams = list(speaker_sorted_distributions.keys()) + speaker_distributions = list(speaker_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + speaker_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + speaker_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions to the Speaker" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevSpeaker{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextSpeaker{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + # Display event-wide graph surrounding each team and their cycle contributions to the Amp. + with amp_cycles_col: + variable_key = f"amp_cycles_col_{type_of_graph}" + + amp_distributions = self._retrieve_amp_cycle_distributions() + amp_sorted_distributions = dict( + sorted( + zip(teams, amp_distributions), + key=lambda pair: (pair[1].median(), pair[1].mean()), + reverse=True + ) + ) + + amp_sorted_teams = list(amp_sorted_distributions.keys()) + amp_distributions = list(amp_sorted_distributions.values()) + + if not st.session_state.get(variable_key): + st.session_state[variable_key] = 0 + + plotly_chart( + box_plot( + amp_sorted_teams[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + amp_distributions[ + st.session_state[variable_key]:st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY + ], + x_axis_label="Teams", + y_axis_label=f"Cycle Distribution", + title=f"Cycle Contributions to the Amp" + ).update_layout( + showlegend=False + ) + ) + + previous_col, next_col = st.columns(2) + + if previous_col.button( + f"Previous {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"prevAmp{type_of_graph}", + disabled=(st.session_state[variable_key] - self.TEAMS_TO_SPLIT_BY < 0) + ): + st.session_state[variable_key] -= self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() + + if next_col.button( + f"Next {self.TEAMS_TO_SPLIT_BY} Teams", + use_container_width=True, + key=f"nextAmp{type_of_graph}", + disabled=(st.session_state[variable_key] + self.TEAMS_TO_SPLIT_BY >= len(teams)) + ): + st.session_state[variable_key] += self.TEAMS_TO_SPLIT_BY + st.experimental_rerun() diff --git a/src/page_managers/picklist_manager.py b/src/page_managers/picklist_manager.py index 19f9ad4..0c6ecd1 100644 --- a/src/page_managers/picklist_manager.py +++ b/src/page_managers/picklist_manager.py @@ -3,11 +3,14 @@ from functools import partial import streamlit as st +from dotenv import load_dotenv from pandas import DataFrame from .page_manager import PageManager from utils import CalculatedStats, Queries, retrieve_scouting_data, retrieve_team_list +load_dotenv() + class PicklistManager(PageManager): """The page manager for the `Picklist` page.""" @@ -18,6 +21,7 @@ def __init__(self): retrieve_scouting_data() ) self.teams = retrieve_team_list() + self.client = Client(auth=os.getenv("NOTION_TOKEN")) # Requested stats is used to define the stats wanted in the picklist generation. self.requested_stats = { diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index b93af54..1e24dfb 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -14,6 +14,7 @@ GeneralConstants, GraphType, line_graph, + multi_line_graph, plotly_chart, Queries, retrieve_team_list, @@ -40,9 +41,11 @@ def generate_input_section(self) -> int: :return: The team number selected to create graphs for. """ + queried_team = int(st.experimental_get_query_params().get("team_number", [0])[0]) return st.selectbox( "Team Number", - retrieve_team_list() + (team_list := retrieve_team_list()), + index=team_list.index(queried_team) if queried_team in team_list else 0 ) def generate_metrics(self, team_number: int) -> None: @@ -220,10 +223,50 @@ def generate_autonomous_graphs( :param type_of_graph: The type of graph to use for the graphs on said page (cycle contribution / point contributions). :return: """ - team_data = scouting_data_for_team(team_number) using_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - # TODO: Add autonomous graphs + # Metric for how many times they left the starting zone + times_left_starting_zone = self.calculated_stats.cumulative_stat( + team_number, + Queries.LEFT_STARTING_ZONE, + Criteria.BOOLEAN_CRITERIA + ) + times_left_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.cumulative_stat(team, Queries.LEFT_STARTING_ZONE, Criteria.BOOLEAN_CRITERIA) + ) + + colored_metric( + "# of Leaves from the Starting Zone", + times_left_starting_zone, + threshold=times_left_for_percentile + ) + + # Speaker/amp over time graph + speaker_cycles_by_match = self.calculated_stats.cycles_by_structure_per_match( + team_number, + Queries.AUTO_SPEAKER + ) * (1 if using_cycle_contributions else 5) + amp_cycles_by_match = self.calculated_stats.cycles_by_structure_per_match( + team_number, + Queries.AUTO_AMP + ) * (1 if using_cycle_contributions else 2) + line_names = [ + ("# of Speaker Cycles" if using_cycle_contributions else "# of Speaker Points"), + ("# of Amp Cycles" if using_cycle_contributions else "# of Amp Points") + ] + + plotly_chart( + multi_line_graph( + range(len(speaker_cycles_by_match)), + [speaker_cycles_by_match, amp_cycles_by_match], + x_axis_label="Match Index", + y_axis_label=line_names, + y_axis_title=f"# of Autonomous {'Cycles' if using_cycle_contributions else 'Points'}", + title=f"Speaker/Amp {'Cycles' if using_cycle_contributions else 'Points'} During Autonomous Over Time", + color_map=dict(zip(line_names, (GeneralConstants.GOLD_GRADIENT[0], GeneralConstants.GOLD_GRADIENT[-1]))) + ) + ) def generate_teleop_graphs( self, diff --git a/src/pages/4_Picklist.py b/src/pages/4_Picklist.py index 3a50433..b27830c 100644 --- a/src/pages/4_Picklist.py +++ b/src/pages/4_Picklist.py @@ -31,6 +31,9 @@ key='download-csv' ) + if st.button("📝 Write to Notion Picklist"): + picklist_manager.write_to_notion(generated_picklist) + diff --git a/src/utils/constants.py b/src/utils/constants.py index fa5f838..c62b283 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -46,10 +46,12 @@ class EventSpecificConstants: """Constants specific to an event.""" EVENT_CODE = "2024vaash" + EVENT_NAME = "Ashland" URL = f"https://raw.githubusercontent.com/team4099/ScoutingAppData/main/{EVENT_CODE}_match_data.json" PIT_SCOUTING_URL = ( f"https://raw.githubusercontent.com/team4099/ScoutingAppData/main/{EVENT_CODE}_pit_scouting_data.csv" ) + PICKLIST_URL = "https://www.notion.so/team4099/42836f096b83453e8f284956799be386?v=a20940a6d4bb4e9bb233a6581c2bf65a" class GraphType(Enum): From ec35a9b997de75fc5367e1b887e3aa9fc7c1f9ec Mon Sep 17 00:00:00 2001 From: Shilab66 <89350258+Shilab66@users.noreply.github.com> Date: Tue, 13 Feb 2024 22:48:55 -0500 Subject: [PATCH 16/19] fix team page --- src/Teams.py | 10 +- src/page_managers/match_manager.py | 2 +- src/page_managers/team_manager.py | 141 ++++++++++++++++++++++++++++- src/requirements.txt | 2 +- src/utils/calculated_stats.py | 22 +++-- src/utils/functions.py | 3 +- src/utils/graphing.py | 2 + 7 files changed, 164 insertions(+), 18 deletions(-) diff --git a/src/Teams.py b/src/Teams.py index 9ea8dc8..fff8bca 100644 --- a/src/Teams.py +++ b/src/Teams.py @@ -20,8 +20,8 @@ # Generate the input section of the `Teams` page. team_number = team_manager.generate_input_section() - metric_tab, auto_graphs_tab, teleop_graphs_tab = st.tabs( - ["📊 Metrics", "🤖 Autonomous Graphs", "🎮 Teleop + Endgame Graphs"] + metric_tab, auto_graphs_tab, teleop_graphs_tab, qualitative_graphs_tab = st.tabs( + ["📊 Metrics", "🤖 Autonomous Graphs", "🎮 Teleop + Endgame Graphs", "📝 Qualitative Graphs"] ) with metric_tab: @@ -69,3 +69,9 @@ team_number, type_of_graph=GraphType.POINT_CONTRIBUTIONS ) + + with qualitative_graphs_tab: + st.write("#### 📝 Qualitative Graphs") + team_manager.generate_qualitative_graphs( + team_number, + ) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index b5a3d31..3c3a372 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -705,7 +705,7 @@ def generate_teleop_graphs( y_axis_label= ["Slow Climbs", "Fast Climbs"], y_axis_title="# of Climb Speeds", title="Climb Speeds by Team", - color_map={"Slow Climbs": GeneralConstants.LIGHT_RED, "Fast Climbs": GeneralConstants.LIGHT_GREEN} + color_map={"Slow Climbs": GeneralConstants.DARK_RED, "Fast Climbs": GeneralConstants.DARK_GREEN} ) ) diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index 1e24dfb..3d75e9c 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -9,7 +9,6 @@ box_plot, CalculatedStats, colored_metric, - colored_metric_with_two_values, Criteria, GeneralConstants, GraphType, @@ -21,7 +20,9 @@ retrieve_pit_scouting_data, retrieve_scouting_data, scouting_data_for_team, - stacked_bar_graph + stacked_bar_graph, + colored_metric_with_two_values, + populate_missing_data ) @@ -242,7 +243,7 @@ def generate_autonomous_graphs( threshold=times_left_for_percentile ) - # Speaker/amp over time graph + # Auto Speaker/amp over time graph speaker_cycles_by_match = self.calculated_stats.cycles_by_structure_per_match( team_number, Queries.AUTO_SPEAKER @@ -279,7 +280,139 @@ def generate_teleop_graphs( :param type_of_graph: The type of graph to use for the graphs on said page (cycle contribution / point contributions). :return: """ + times_climbed_col, times_harmonized_col = st.columns(2) + speaker_amp_col, climb_speed_col = st.columns(2) + + team_data = scouting_data_for_team(team_number) using_cycle_contributions = type_of_graph == GraphType.CYCLE_CONTRIBUTIONS - # TODO: Add teleop graphs + # Teleop Speaker/amp over time graph + with speaker_amp_col: + speaker_cycles_by_match = self.calculated_stats.cycles_by_structure_per_match( + team_number, + Queries.TELEOP_SPEAKER + ) * (1 if using_cycle_contributions else 5) + amp_cycles_by_match = self.calculated_stats.cycles_by_structure_per_match( + team_number, + Queries.TELEOP_AMP + ) * (1 if using_cycle_contributions else 2) + line_names = [ + ("# of Speaker Cycles" if using_cycle_contributions else "# of Speaker Points"), + ("# of Amp Cycles" if using_cycle_contributions else "# of Amp Points") + ] + + plotly_chart( + multi_line_graph( + range(len(speaker_cycles_by_match)), + [speaker_cycles_by_match, amp_cycles_by_match], + x_axis_label="Match Index", + y_axis_label=line_names, + y_axis_title=f"# of Teleop {'Cycles' if using_cycle_contributions else 'Points'}", + title=f"Speaker/Amp {'Cycles' if using_cycle_contributions else 'Points'} During Teleop Over Time", + color_map=dict(zip(line_names, (GeneralConstants.GOLD_GRADIENT[0], GeneralConstants.GOLD_GRADIENT[-1]))) + ) + ) + + # Metric for times climbed + with times_climbed_col: + times_climbed = self.calculated_stats.cumulative_stat( + team_number, + Queries.CLIMBED_CHAIN, + Criteria.BOOLEAN_CRITERIA + ) + times_climbed_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN, Criteria.BOOLEAN_CRITERIA) + ) + + colored_metric( + "# of Times Climbed", + times_climbed, + threshold=times_climbed_for_percentile + ) + + # Metric for harmonized + with times_harmonized_col: + times_harmonized = self.calculated_stats.cumulative_stat( + team_number, + Queries.HARMONIZED_ON_CHAIN, + Criteria.BOOLEAN_CRITERIA + ) + times_harmonized_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.cumulative_stat(team, Queries.HARMONIZED_ON_CHAIN, Criteria.BOOLEAN_CRITERIA) + ) + + colored_metric( + "# of Times Harmonized", + times_harmonized, + threshold=times_harmonized_for_percentile + ) + + # Climb speed over time graph + with climb_speed_col: + climb_speed_by_match = self.calculated_stats.stat_per_match( + team_number, + Queries.CLIMB_SPEED + ) + + plotly_chart( + line_graph( + range(len(climb_speed_by_match)), + climb_speed_by_match, + x_axis_label="Match Index", + y_axis_label="Climb Speed", + title=f"Climb Speed Over Time", + ) + ) + + def generate_qualitative_graphs( + self, + team_number: int, + ) -> None: + """Generates the teleop graphs for the `Team` page. + + :param team_number: The team to generate the graphs for. + :return: + """ + driver_rating_col, defense_skill_col, disables_col = st.columns(3) + + with driver_rating_col: + driver_rating_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DRIVER_RATING) + + plotly_chart( + line_graph( + range(len(driver_rating_by_match)), + driver_rating_by_match, + x_axis_label="Match Key", + y_axis_label="Driver Rating (1-5)", + title="Driver Rating Over Time", + ) + ) + + with defense_skill_col: + defense_skill_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DEFENSE_SKILL) + + plotly_chart( + line_graph( + range(len(defense_skill_by_match)), + defense_skill_by_match, + x_axis_label="Match Key", + y_axis_label="Defense Skill (1-5)", + title="Defense Skill Over Time", + ) + ) + + with disables_col: + disables_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DISABLE) + + plotly_chart( + line_graph( + range(len(disables_by_match)), + disables_by_match, + x_axis_label="Match Key", + y_axis_label="Disables", + title="Disables by Match", + ) + ) \ No newline at end of file diff --git a/src/requirements.txt b/src/requirements.txt index 0ef76fb..74cca03 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,6 +1,6 @@ pandas==2.0.1 plotly==5.14.1 requests==2.30.0 -streamlit==1.24.0 +streamlit==1.31.0 scipy==1.10.1 tbapy==1.3.2 diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 96f7f65..6aaa97f 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -1,4 +1,6 @@ """File that contains the class which calculates statistics for a team/event/for other purposes.""" +from __future__ import annotations + from functools import reduce from typing import Callable @@ -211,6 +213,17 @@ def average_driver_rating(self, team_number: int) -> float: return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].apply( lambda driver_rating: Criteria.DRIVER_RATING_CRITERIA.get(driver_rating, float("nan")) ).mean() + + def driver_by_match(self, team_number: int) -> float: + """Returns a series of data representing the team's defense rating + + :param team_number: The team to find defense data for. + :return: A series with the teams defense data. + """ + + return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].apply( + lambda driver_rating: Criteria.BASIC_RATING_CRITERIA.get(driver_rating, float("nan")) + ) def average_defense_time(self, team_number: int) -> float: """Returns the average defense time of a team @@ -232,6 +245,7 @@ def average_defense_skill(self, team_number: int) -> float: lambda defense_skill: Criteria.BASIC_RATING_CRITERIA.get(defense_skill, float("nan")) ).mean() + def average_counter_defense_skill(self, team_number: int) -> float: """Returns the average counter defense skill (ability to swerve past defense) of a team. @@ -241,14 +255,6 @@ def average_counter_defense_skill(self, team_number: int) -> float: return scouting_data_for_team(team_number, self.data)[Queries.COUNTER_DEFENSE_SKIll].apply( lambda counter_defense_skill: Criteria.BASIC_RATING_CRITERIA.get(counter_defense_skill, float("nan")) ).mean() - - def disables_by_match(self, team_number: int) -> float: - """Returns a series of data representing the team's disables - - :param team_number: The team to find disable data for. - :return: A series with the teams disable data. - """ - return scouting_data_for_team(team_number, self.data)[Queries.DISABLE] def drivetrain_width_by_team(self, team_number: int) -> float: """Returns a float representing the teams drivetrain width diff --git a/src/utils/functions.py b/src/utils/functions.py index b0023d9..e2fbfc3 100644 --- a/src/utils/functions.py +++ b/src/utils/functions.py @@ -112,7 +112,7 @@ def scouting_data_for_team(team_number: int, scouting_data: DataFrame | None = N return scouting_data[ scouting_data["TeamNumber"] == team_number - ] + ] def retrieve_team_list() -> list: @@ -129,4 +129,3 @@ def retrieve_team_list() -> list: scouting_data["TeamNumber"] ) ) - diff --git a/src/utils/graphing.py b/src/utils/graphing.py index 917063f..2c9a684 100644 --- a/src/utils/graphing.py +++ b/src/utils/graphing.py @@ -1,4 +1,6 @@ """Defines graphing functions that are later used in FalconVis that wrap around Plotly.""" +from __future__ import annotations + import numpy as np import plotly.express as px import streamlit as st From 74d36884d7036d2741f514ac301b2de01b0e0f2c Mon Sep 17 00:00:00 2001 From: Shom770 Date: Wed, 14 Feb 2024 13:21:28 -0500 Subject: [PATCH 17/19] prevent int from getting converted --- src/page_managers/picklist_manager.py | 147 +++++++++++++++++++++++++- src/utils/calculated_stats.py | 20 +++- src/utils/functions.py | 12 +++ 3 files changed, 171 insertions(+), 8 deletions(-) diff --git a/src/page_managers/picklist_manager.py b/src/page_managers/picklist_manager.py index 0c6ecd1..629f084 100644 --- a/src/page_managers/picklist_manager.py +++ b/src/page_managers/picklist_manager.py @@ -1,13 +1,16 @@ """Creates the `PicklistManager` class used to set up the Picklist page and its table.""" +import os from functools import partial import streamlit as st from dotenv import load_dotenv +from notion_client import Client +from notion_client.helpers import get_id from pandas import DataFrame from .page_manager import PageManager -from utils import CalculatedStats, Queries, retrieve_scouting_data, retrieve_team_list +from utils import CalculatedStats, Criteria, EventSpecificConstants, Queries, retrieve_scouting_data, retrieve_team_list load_dotenv() @@ -32,8 +35,39 @@ def __init__(self): "Average Teleop Cycles": partial( self.calculated_stats.average_cycles, mode=Queries.TELEOP - ) - } # TODO: Add more stats here later + ), + "Average Speaker Cycles": partial( + self.calculated_stats.average_cycles_for_structure, + structure=(Queries.AUTO_SPEAKER, Queries.TELEOP_SPEAKER) + ), + "Average Amp Cycles": partial( + self.calculated_stats.average_cycles_for_structure, + structure=(Queries.AUTO_AMP, Queries.TELEOP_AMP) + ), + "Average Trap Cycles": partial( + self.calculated_stats.average_cycles_for_structure, + structure=Queries.TELEOP_TRAP + ), + "# of Times Climbed": partial( + self.calculated_stats.cumulative_stat, + stat=Queries.CLIMBED_CHAIN, + criteria=Criteria.BOOLEAN_CRITERIA + ), + "# of Times Harmonized": partial( + self.calculated_stats.cumulative_stat, + stat=Queries.HARMONIZED_ON_CHAIN, + criteria=Criteria.BOOLEAN_CRITERIA + ), + "# of Disables": partial( + self.calculated_stats.cumulative_stat, + stat=Queries.DISABLE, + criteria=Criteria.BOOLEAN_CRITERIA + ), + "Average Driver Rating": self.calculated_stats.average_driver_rating, + "Average Defense Skill": self.calculated_stats.average_defense_skill, + "Average Defense Time": self.calculated_stats.average_defense_time, + "Average Counter Defense Skill": self.calculated_stats.average_counter_defense_skill + } def generate_input_section(self) -> list[list, list]: """Creates the input section for the `Picklist` page. @@ -63,3 +97,110 @@ def generate_picklist(self, stats_requested: list[str]) -> DataFrame: for team in self.teams ] return DataFrame.from_dict(requested_picklist) + + def write_to_notion(self, dataframe: DataFrame) -> None: + """Writes to a Notion picklist entered by the user in the constants file. + + :param dataframe: The dataframe containing all the statistics of each team. + :return: + """ + # Generate Notion Database first + properties = { + "Team Name": {"title": {}} + } | { + column: {"number": {}} for column in dataframe.columns if column != "Team Number" + } + icon = {"type": "emoji", "emoji": "🗒️"} + self.client.databases.update( + database_id=(db_id := get_id(EventSpecificConstants.PICKLIST_URL)), properties=properties, icon=icon + ) + + # Retrieve poroperties of database + db_properties = self.client.databases.query(database_id=db_id)["results"][0]["properties"] + ids = {property_name: property_values["id"] for property_name, property_values in db_properties.items()} + + # Find percentiles across all teams + percentile_75 = self.calculated_stats.quantile_stat( + 0.75, + lambda self_, team: self_.average_cycles(team) + ) + percentile_50 = self.calculated_stats.quantile_stat( + 0.5, + lambda self_, team: self_.average_cycles(team) + ) + percentile_25 = self.calculated_stats.quantile_stat( + 0.25, + lambda self_, team: self_.average_cycles(team) + ) + + for _, row in dataframe.iterrows(): + team_name = row["Team Number"] + query_page = self.client.databases.query( + database_id=db_id, + filter={ + "property": "Team Name", + "title": { + "contains": team_name, + }, + } + ) + # Based off of the percentile between all their stats + team_cycles = self.calculated_stats.average_cycles(int(team_name.split()[1])) + + if team_cycles > percentile_75: + emoji = "🔵" + elif percentile_50 <= team_cycles < percentile_75: + emoji = "🟢" + elif percentile_25 <= team_cycles < percentile_50: + emoji = "🟠" + else: + emoji = "🔴" + + # No page created yet. + if not query_page["results"]: + self.client.pages.create( + database_id=db_id, + icon={"type": "emoji", "emoji": emoji}, + parent={"type": "database_id", "database_id": db_id}, + properties={ + column: { + "id": ids[column], + "number": dataframe[dataframe["Team Number"] == team_name][column].iloc[0] + } for column in dataframe.columns if column != "Team Number" + } | { + "Team Name": {"id": "title", "title": [{"text": {"content": team_name}}]}, + }, + children=[ + { + "object": "block", + "type": "embed", + "embed": { + "url": f"https://falconvis-{EventSpecificConstants.EVENT_CODE[-3:]}.streamlit.app?team_number=4099" + } + } + ] + ) + # Page already created + else: + self.client.pages.update( + page_id=query_page["results"][0]["id"], + icon={"type": "emoji", "emoji": emoji}, + parent={"type": "database_id", "database_id": db_id}, + properties={ + column: { + "id": ids[column], + "number": dataframe[dataframe["Team Number"] == team_name][column].iloc[0] + } for column in dataframe.columns if column != "Team Number" + } | { + "Team Name": {"id": "title", "title": [{"text": {"content": team_name}}]}, + }, + children=[ + { + "object": "block", + "type": "embed", + "embed": { + "url": f"https://falconvis-{EventSpecificConstants.EVENT_CODE[-3:]}.streamlit.app?team_number=4099" + } + } + ] + ) diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 96f7f65..fa95abe 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -6,8 +6,8 @@ from numpy import percentile from pandas import DataFrame, Series, isna -from .constants import Criteria, GeneralConstants, Queries -from .functions import scouting_data_for_team, retrieve_team_list, retrieve_pit_scouting_data +from .constants import Criteria, Queries +from .functions import _convert_to_float_from_numpy_type, scouting_data_for_team, retrieve_team_list, retrieve_pit_scouting_data __all__ = ["CalculatedStats"] @@ -81,7 +81,7 @@ def points_contributed_by_match(self, team_number: int, mode: str = "") -> Serie ) # Cycle calculation methods - def average_cycles(self, team_number: int, mode: str) -> float: + def average_cycles(self, team_number: int, mode: str = None) -> float: """Calculates the average cycles for a team in either autonomous or teleop (wrapper around `cycles_by_match`). The following custom graphs are supported with this function: @@ -91,7 +91,10 @@ def average_cycles(self, team_number: int, mode: str) -> float: :param mode: The mode to calculate said cycles for (Auto/Teleop) :return: A float representing the average cycles for said team in the mode specified. """ - return self.cycles_by_match(team_number, mode).mean() + if mode: + return self.cycles_by_match(team_number, mode).mean() + else: + return (self.cycles_by_match(team_number, Queries.AUTO) + self.cycles_by_match(team_number, Queries.TELEOP)).mean() def average_cycles_for_structure(self, team_number: int, structure: str) -> float: """Calculates the average cycles for a team for a structure (wrapper around `cycles_by_match`). @@ -168,6 +171,7 @@ def potential_amplification_periods_by_match(self, team_number: int) -> Series: return self.cycles_by_structure_per_match(team_number, (Queries.AUTO_AMP, Queries.TELEOP_AMP)) // 2 # Alliance-wide methods + @_convert_to_float_from_numpy_type def average_coop_bonus_rate(self, team_number: int) -> float: """Returns the average rate (%) that the coopertition bonus is reached by an alliance (average method). (ignore) @@ -202,6 +206,7 @@ def reaches_coop_bonus_by_match(self, team_number: int) -> Series: return auto_amp_sufficient | teleop_amp_sufficient # Rating methods + @_convert_to_float_from_numpy_type def average_driver_rating(self, team_number: int) -> float: """Returns the average driver rating of a team. @@ -211,7 +216,8 @@ def average_driver_rating(self, team_number: int) -> float: return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].apply( lambda driver_rating: Criteria.DRIVER_RATING_CRITERIA.get(driver_rating, float("nan")) ).mean() - + + @_convert_to_float_from_numpy_type def average_defense_time(self, team_number: int) -> float: """Returns the average defense time of a team @@ -222,6 +228,7 @@ def average_defense_time(self, team_number: int) -> float: lambda defense_time: Criteria.DEFENSE_TIME_CRITERIA.get(defense_time, float("nan")) ).mean() + @_convert_to_float_from_numpy_type def average_defense_skill(self, team_number: int) -> float: """Returns the average defense skill of a team. @@ -232,6 +239,7 @@ def average_defense_skill(self, team_number: int) -> float: lambda defense_skill: Criteria.BASIC_RATING_CRITERIA.get(defense_skill, float("nan")) ).mean() + @_convert_to_float_from_numpy_type def average_counter_defense_skill(self, team_number: int) -> float: """Returns the average counter defense skill (ability to swerve past defense) of a team. @@ -275,6 +283,7 @@ def quantile_stat(self, quantile: float, predicate: Callable) -> float: return percentile(dataset, quantile * 100) # General methods + @_convert_to_float_from_numpy_type def average_stat(self, team_number: int, stat: str, criteria: dict | None = None) -> float: """Calculates the average statistic for a team (wrapper around `stat_per_match`). @@ -285,6 +294,7 @@ def average_stat(self, team_number: int, stat: str, criteria: dict | None = None """ return self.stat_per_match(team_number, stat, criteria).mean() + @_convert_to_float_from_numpy_type def cumulative_stat(self, team_number: int, stat: str, criteria: dict | None = None) -> int: """Calculates a cumulative stat for a team (wrapper around `stat_per_match`). diff --git a/src/utils/functions.py b/src/utils/functions.py index b0023d9..7585743 100644 --- a/src/utils/functions.py +++ b/src/utils/functions.py @@ -130,3 +130,15 @@ def retrieve_team_list() -> list: ) ) + +def _convert_to_float_from_numpy_type(function): + """ + Helper decorator used in Calculated Stats to convert numpy native types to Python native types. + + :param function: The function "decorated". + :return: A wrapper function. + """ + def wrapper(*args, **kwargs) -> float: + return float(function(*args, **kwargs)) # Converts numpy dtype to native python type + + return wrapper \ No newline at end of file From e79b888b642d6a46a02df1bae9953f607f201ab3 Mon Sep 17 00:00:00 2001 From: Shom770 Date: Wed, 14 Feb 2024 14:45:45 -0500 Subject: [PATCH 18/19] falconviz finished --- src/page_managers/match_manager.py | 36 +++- src/page_managers/team_manager.py | 281 +++++++++++++++++++---------- src/utils/calculated_stats.py | 12 +- src/utils/constants.py | 14 ++ src/utils/functions.py | 6 +- src/utils/graphing.py | 14 +- 6 files changed, 244 insertions(+), 119 deletions(-) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index 3c3a372..97155c5 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -617,6 +617,12 @@ def generate_teleop_graphs( speaker_cycles_over_time_col, amp_periods_over_time_col = st.columns(2, gap="large") climb_breakdown_by_team_col, climb_speed_by_team = st.columns(2, gap="large") + short_gradient = [ + GeneralConstants.LIGHT_RED, + GeneralConstants.RED_TO_GREEN_GRADIENT[2], + GeneralConstants.LIGHT_GREEN + ] + # Display the teleop speaker cycles of each team over time with speaker_cycles_over_time_col: cycles_by_team = [ @@ -626,6 +632,11 @@ def generate_teleop_graphs( ) for team in team_numbers ] + best_teams = sorted(zip(team_numbers, cycles_by_team), key=lambda pair: pair[1].mean()) + color_map = { + pair[0]: color + for pair, color in zip(best_teams, short_gradient) + } plotly_chart( multi_line_graph( @@ -642,7 +653,7 @@ def generate_teleop_graphs( if display_cycle_contributions else "Points Contributed in the Speaker Over Time" ), - color_map=dict(zip(team_numbers, color_gradient)) + color_map=color_map ) ) @@ -652,6 +663,11 @@ def generate_teleop_graphs( self.calculated_stats.potential_amplification_periods_by_match(team) for team in team_numbers ] + best_teams = sorted(zip(team_numbers, amp_periods_by_team), key=lambda pair: pair[1].mean()) + color_map = { + pair[0]: color + for pair, color in zip(best_teams, short_gradient) + } plotly_chart( multi_line_graph( @@ -660,18 +676,18 @@ def generate_teleop_graphs( y_axis_label=team_numbers, y_axis_title="# of Potential Amplification Periods", title="Potential Amplification Periods Produced by Alliance", - color_map=dict(zip(team_numbers, color_gradient)) + color_map=color_map ) ) with climb_breakdown_by_team_col: - normal_climbs_by_team = [ + harmonized_climbs_by_team = [ team_data[Queries.HARMONIZED_ON_CHAIN].sum() for team_data in teams_data ] - harmonized_climbs_by_team = [ - team_data[Queries.CLIMBED_CHAIN].sum() - harmonized_climbs #This works but it shouldn't I think we have harmonized climbs and normal climbs reversed - for team_data, harmonized_climbs in zip(teams_data, normal_climbs_by_team) + normal_climbs_by_team = [ + team_data[Queries.CLIMBED_CHAIN].sum() - harmonized_climbs + for team_data, harmonized_climbs in zip(teams_data, harmonized_climbs_by_team) ] plotly_chart( @@ -682,7 +698,7 @@ def generate_teleop_graphs( y_axis_label= ["Normal Climbs", "Harmonized Climbs"], y_axis_title="# of Climb Types", title="Climbs by Team", - color_map={"Normal Climbs": color_gradient[1], "Harmonized Climbs": color_gradient[2]} + color_map={"Normal Climbs": color_gradient[0], "Harmonized Climbs": color_gradient[1]} ) ) @@ -702,10 +718,10 @@ def generate_teleop_graphs( team_numbers, [slow_climbs, fast_climbs], x_axis_label="Teams", - y_axis_label= ["Slow Climbs", "Fast Climbs"], + y_axis_label=["Slow Climbs", "Fast Climbs"], y_axis_title="# of Climb Speeds", title="Climb Speeds by Team", - color_map={"Slow Climbs": GeneralConstants.DARK_RED, "Fast Climbs": GeneralConstants.DARK_GREEN} + color_map={"Slow Climbs": GeneralConstants.LIGHT_RED, "Fast Climbs": GeneralConstants.LIGHT_GREEN} ) ) @@ -758,7 +774,7 @@ def generate_qualitative_graphs( with disables_by_team_col: disables_by_team = [ - self.calculated_stats.disables_by_match(team).sum() + self.calculated_stats.cumulative_stat(team, Queries.DISABLE, Criteria.BOOLEAN_CRITERIA) for team in team_numbers ] diff --git a/src/page_managers/team_manager.py b/src/page_managers/team_manager.py index 3d75e9c..0380aec 100644 --- a/src/page_managers/team_manager.py +++ b/src/page_managers/team_manager.py @@ -1,6 +1,9 @@ """Creates the `TeamManager` class used to set up the Teams page and its graphs.""" +import re import streamlit as st +from annotated_text import annotated_text +from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer from .contains_metrics import ContainsMetrics from .page_manager import PageManager @@ -55,7 +58,7 @@ def generate_metrics(self, team_number: int) -> None: :param team_number: The team number to calculate the metrics for. """ points_contributed_col, drivetrain_col, auto_cycle_col, teleop_cycle_col = st.columns(4) - iqr_col, trap_ability_col, times_climbed_col, harmonize_ability_col = st.columns(4) + iqr_col, trap_ability_col, climb_breakdown_col, disables_col = st.columns(4) # Metric for avg. points contributed with points_contributed_col: @@ -178,8 +181,8 @@ def generate_metrics(self, team_number: int) -> None: value_formatter=lambda value: "Yes" if value > 0 else "No" ) - # Metric for total times climbed - with times_climbed_col: + # Metric for total times climbed and total harmonizes + with climb_breakdown_col: times_climbed = self.calculated_stats.cumulative_stat( team_number, Queries.CLIMBED_CHAIN, @@ -189,14 +192,7 @@ def generate_metrics(self, team_number: int) -> None: 0.5, lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN, Criteria.BOOLEAN_CRITERIA) ) - colored_metric( - "# of Times Climbed", - times_climbed, - threshold=times_climbed_for_percentile - ) - # Metric for ability to harmonize - with harmonize_ability_col: times_harmonized = self.calculated_stats.cumulative_stat( team_number, Queries.HARMONIZED_ON_CHAIN, @@ -207,10 +203,32 @@ def generate_metrics(self, team_number: int) -> None: lambda self, team: self.cumulative_stat(team, Queries.HARMONIZED_ON_CHAIN, Criteria.BOOLEAN_CRITERIA) ) - colored_metric( - "# of Times Harmonized", + colored_metric_with_two_values( + "Climb Breakdown", + "# of Times Climbed/Harmonized", + times_climbed, times_harmonized, - threshold=times_harmonized_for_percentile + first_threshold=times_climbed_for_percentile, + second_threshold=times_harmonized_for_percentile + ) + + # Metric for number of disables + with disables_col: + times_disabled = self.calculated_stats.cumulative_stat( + team_number, + Queries.DISABLE, + Criteria.BOOLEAN_CRITERIA + ) + times_disabled_for_percentile = self.calculated_stats.quantile_stat( + 0.5, + lambda self, team: self.cumulative_stat(team, Queries.DISABLE, Criteria.BOOLEAN_CRITERIA) + ) + + colored_metric( + "# of Times Disabled", + times_disabled, + threshold=times_disabled_for_percentile, + invert_threshold=True ) def generate_autonomous_graphs( @@ -314,105 +332,180 @@ def generate_teleop_graphs( ) ) - # Metric for times climbed - with times_climbed_col: - times_climbed = self.calculated_stats.cumulative_stat( - team_number, - Queries.CLIMBED_CHAIN, - Criteria.BOOLEAN_CRITERIA - ) - times_climbed_for_percentile = self.calculated_stats.quantile_stat( - 0.5, - lambda self, team: self.cumulative_stat(team, Queries.CLIMBED_CHAIN, Criteria.BOOLEAN_CRITERIA) - ) - - colored_metric( - "# of Times Climbed", - times_climbed, - threshold=times_climbed_for_percentile - ) - - # Metric for harmonized - with times_harmonized_col: - times_harmonized = self.calculated_stats.cumulative_stat( - team_number, - Queries.HARMONIZED_ON_CHAIN, - Criteria.BOOLEAN_CRITERIA - ) - times_harmonized_for_percentile = self.calculated_stats.quantile_stat( - 0.5, - lambda self, team: self.cumulative_stat(team, Queries.HARMONIZED_ON_CHAIN, Criteria.BOOLEAN_CRITERIA) - ) - - colored_metric( - "# of Times Harmonized", - times_harmonized, - threshold=times_harmonized_for_percentile - ) - # Climb speed over time graph with climb_speed_col: - climb_speed_by_match = self.calculated_stats.stat_per_match( + slow_climbs = self.calculated_stats.cumulative_stat( + team_number, + Queries.CLIMB_SPEED, + {"Slow": 1} + ) + fast_climbs = self.calculated_stats.cumulative_stat( team_number, - Queries.CLIMB_SPEED + Queries.CLIMB_SPEED, + {"Fast": 1} ) plotly_chart( - line_graph( - range(len(climb_speed_by_match)), - climb_speed_by_match, - x_axis_label="Match Index", - y_axis_label="Climb Speed", - title=f"Climb Speed Over Time", + bar_graph( + ["Slow Climbs", "Fast Climbs"], + [slow_climbs, fast_climbs], + x_axis_label="Type of Climb", + y_axis_label="# of Climbs", + title=f"Climb Speed Breakdown", + color={"Slow Climbs": GeneralConstants.LIGHT_RED, "Fast Climbs": GeneralConstants.LIGHT_GREEN}, + color_indicator="Type of Climb" ) ) - def generate_qualitative_graphs( - self, - team_number: int, - ) -> None: - """Generates the teleop graphs for the `Team` page. + def generate_qualitative_graphs(self, team_number: int) -> None: + """Generates the qualitative graphs for the `Team` page. :param team_number: The team to generate the graphs for. :return: """ - driver_rating_col, defense_skill_col, disables_col = st.columns(3) + # Constants used for the sentiment analysis + ml_weight = 1 + estimate_weight = 1 - with driver_rating_col: - driver_rating_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DRIVER_RATING) + sentiment = SentimentIntensityAnalyzer() + positivity_scores = [] + scouting_data = scouting_data_for_team(team_number) - plotly_chart( - line_graph( - range(len(driver_rating_by_match)), - driver_rating_by_match, - x_axis_label="Match Key", - y_axis_label="Driver Rating (1-5)", - title="Driver Rating Over Time", - ) - ) + # Split into two tabs + qualitative_graphs_tab, note_scouting_analysis_tab = st.tabs( + ["📊 Qualitative Graphs", "✏️ Note Scouting Analysis"] + ) - with defense_skill_col: - defense_skill_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DEFENSE_SKILL) + with qualitative_graphs_tab: + driver_rating_col, defense_skill_col, counter_defense_skill = st.columns(3) + + with driver_rating_col: + driver_rating_types = Criteria.DRIVER_RATING_CRITERIA.keys() + driver_rating_by_type = [ + self.calculated_stats.cumulative_stat(team_number, Queries.DRIVER_RATING, {driver_rating_type: 1}) + for driver_rating_type in driver_rating_types + ] + + plotly_chart( + bar_graph( + driver_rating_types, + driver_rating_by_type, + x_axis_label="Driver Rating", + y_axis_label="# of Occurrences", + title="Driver Rating Breakdown", + color=dict(zip(driver_rating_types, GeneralConstants.RED_TO_GREEN_GRADIENT[::-1])), + color_indicator="Driver Rating" + ) + ) - plotly_chart( - line_graph( - range(len(defense_skill_by_match)), - defense_skill_by_match, - x_axis_label="Match Key", - y_axis_label="Defense Skill (1-5)", - title="Defense Skill Over Time", + with defense_skill_col: + defense_skill_types = Criteria.BASIC_RATING_CRITERIA.keys() + defense_skill_by_type = [ + self.calculated_stats.cumulative_stat(team_number, Queries.DEFENSE_SKILL, {defense_skill_type: 1}) + for defense_skill_type in defense_skill_types + ] + + plotly_chart( + bar_graph( + defense_skill_types, + defense_skill_by_type, + x_axis_label="Defense Skill", + y_axis_label="# of Occurrences", + title="Defense Skill Breakdown", + color=dict(zip(defense_skill_types, GeneralConstants.RED_TO_GREEN_GRADIENT[::-1])), + color_indicator="Defense Skill" + ) ) - ) - with disables_col: - disables_by_match = self.calculated_stats.stat_per_match(team_number, Queries.DISABLE) + with counter_defense_skill: + counter_defense_skill_types = Criteria.BASIC_RATING_CRITERIA.keys() + counter_defense_skill_by_type = [ + self.calculated_stats.cumulative_stat( + team_number, + Queries.COUNTER_DEFENSE_SKIll, + {counter_defense_skill_type: 1} + ) + for counter_defense_skill_type in defense_skill_types + ] + + plotly_chart( + bar_graph( + counter_defense_skill_types, + counter_defense_skill_by_type, + x_axis_label="Counter Defense Skill", + y_axis_label="# of Occurrences", + title="Counter Defense Skill Breakdown", + color=dict(zip(counter_defense_skill_types, GeneralConstants.RED_TO_GREEN_GRADIENT[::-1])), + color_indicator="Counter Defense Skill" + ) + ) - plotly_chart( - line_graph( - range(len(disables_by_match)), - disables_by_match, - x_axis_label="Match Key", - y_axis_label="Disables", - title="Disables by Match", + with note_scouting_analysis_tab: + notes_col, metrics_col = st.columns(2, gap="medium") + notes_by_match = dict( + zip( + scouting_data[Queries.MATCH_KEY], + ( + scouting_data[Queries.AUTO_NOTES].apply(lambda note: (note + " ").lower() if note else "") + + scouting_data[Queries.TELEOP_NOTES].apply( + lambda note: (note + " ").lower() if note else "") + + scouting_data[Queries.ENDGAME_NOTES].apply( + lambda note: (note + " ").lower() if note else "") + + scouting_data[Queries.RATING_NOTES].apply(lambda note: note.lower()) + + ) ) - ) \ No newline at end of file + ) + + with notes_col: + st.write("##### Notes") + st.markdown("
", + unsafe_allow_html=True) # Hacky way to create a divider without whitespace + + for match_key, notes in notes_by_match.items(): + if notes.strip().replace("|", ""): + notes_col.write(f"###### {match_key}") + + text_split_by_words = re.split(r"(\s+)", notes) + annotated_words = [] + # Used to create a rough estimate of how positive the notes are. Positive terms have a weight of + # one, while negative terms have a weight of negative one and neutral terms have a weight of zero. + sentiment_scores = [] + + for word in text_split_by_words: + if not word.strip(): + annotated_words.append(word) + continue + + if any(term in word.lower() for term in GeneralConstants.POSITIVE_TERMS): + annotated_words.append((word, "", f"{GeneralConstants.LIGHT_GREEN}75")) + sentiment_scores.append(1) + elif any(term in word.lower() for term in GeneralConstants.NEGATIVE_TERMS): + annotated_words.append((word, "", f"{GeneralConstants.LIGHT_RED}75")) + sentiment_scores.append(-1) + else: + annotated_words.append(word) + + # A score given to the notes given that generates a "sentiment score", using + # the English vocabulary to determine how positive a string of text is. The downside of this method + # is that it won't catch negative terms in the context of a robot's performance, which is why + # we weight it with our own estimate of the "sentiment" score. + ml_generated_score = sentiment.polarity_scores(notes)["compound"] + sentiment_estimate = sum(sentiment_scores) / (len(sentiment_scores) or 1) + positivity_scores.append( + (ml_generated_score * ml_weight + sentiment_estimate * estimate_weight) / 2 + ) + + annotated_text( + *annotated_words + ) + st.markdown("
", unsafe_allow_html=True) + + with metrics_col: + st.write("##### Metrics") + + colored_metric( + "Positivity Score of Notes", + round(sum(positivity_scores) / (len(positivity_scores) or 1), 2), + threshold=0 + ) \ No newline at end of file diff --git a/src/utils/calculated_stats.py b/src/utils/calculated_stats.py index 50d412d..136c92e 100644 --- a/src/utils/calculated_stats.py +++ b/src/utils/calculated_stats.py @@ -219,10 +219,8 @@ def average_driver_rating(self, team_number: int) -> float: lambda driver_rating: Criteria.DRIVER_RATING_CRITERIA.get(driver_rating, float("nan")) ).mean() -<<<<<<< HEAD @_convert_to_float_from_numpy_type -======= - def driver_by_match(self, team_number: int) -> float: + def average_defense_rating(self, team_number: int) -> float: """Returns a series of data representing the team's defense rating :param team_number: The team to find defense data for. @@ -232,8 +230,8 @@ def driver_by_match(self, team_number: int) -> float: return scouting_data_for_team(team_number, self.data)[Queries.DRIVER_RATING].apply( lambda driver_rating: Criteria.BASIC_RATING_CRITERIA.get(driver_rating, float("nan")) ) - ->>>>>>> ec35a9b997de75fc5367e1b887e3aa9fc7c1f9ec + + @_convert_to_float_from_numpy_type def average_defense_time(self, team_number: int) -> float: """Returns the average defense time of a team @@ -255,11 +253,7 @@ def average_defense_skill(self, team_number: int) -> float: lambda defense_skill: Criteria.BASIC_RATING_CRITERIA.get(defense_skill, float("nan")) ).mean() -<<<<<<< HEAD @_convert_to_float_from_numpy_type -======= - ->>>>>>> ec35a9b997de75fc5367e1b887e3aa9fc7c1f9ec def average_counter_defense_skill(self, team_number: int) -> float: """Returns the average counter defense skill (ability to swerve past defense) of a team. diff --git a/src/utils/constants.py b/src/utils/constants.py index c62b283..6fd2bd2 100644 --- a/src/utils/constants.py +++ b/src/utils/constants.py @@ -27,11 +27,15 @@ class GeneralConstants: BLUE_ALLIANCE_GRADIENT = ["#0b2e61", "#355687", "#7da0d1", "#a8c1e3"] GOLD_GRADIENT = ["#ffbd4d", "#ff9000", "#dd5f00"] LEVEL_GRADIENT = ["#f44a53", "#ff8800", "#f4c717"] + RED_TO_GREEN_GRADIENT = ["#ffb6b3", "#ffd5d4", "#e7f1e8", "#bde7bd", "#77dd76"] + SHORT_RED_TO_GREEN_GRADIENT = ["#ffb6b3", "#ffd5d4", "#bde7bd", "#77dd76"] # Colors DARK_RED = "#450a0a" DARK_BLUE = "#172554" DARK_GREEN = "#052e16" + LIGHT_RED = "#ff7276" + LIGHT_GREEN = "#00873e" # Game piece colors CONE_COLOR = PRIMARY_COLOR @@ -41,6 +45,10 @@ class GeneralConstants: TELEOP_TOTAL_TIME = (2 * 60 + 15) TELEOP_MINUS_ENDGAME = TELEOP_TOTAL_TIME - 20 + # Sentiment analysis terms + POSITIVE_TERMS = {"consistent", "speed", "good", "cycle", "fast", "score", "well", "amazing", "spectactular"} + NEGATIVE_TERMS = {"can't", "disable", "foul", "bad", "drop", "stuck", "poor", "missed", "slow", "only", "tip", "broke", "struggle", "bug", "prone"} + class EventSpecificConstants: """Constants specific to an event.""" @@ -88,6 +96,12 @@ class Queries: COUNTER_DEFENSE_SKIll = "CounterDefenseSkill" DISABLE = "Disabled" + # Notes + AUTO_NOTES = "AutoNotes" + TELEOP_NOTES = "TeleopNotes" + ENDGAME_NOTES = "EndgameNotes" + RATING_NOTES = "RatingNotes" + # Alliance constants RED_ALLIANCE = "red" BLUE_ALLIANCE = "blue" diff --git a/src/utils/functions.py b/src/utils/functions.py index 9dc872d..7079090 100644 --- a/src/utils/functions.py +++ b/src/utils/functions.py @@ -5,6 +5,7 @@ from typing import Any import streamlit as st +from numpy import int64 from pandas import DataFrame, read_csv from requests import get from tbapy import TBA @@ -138,8 +139,9 @@ def _convert_to_float_from_numpy_type(function): :param function: The function "decorated". :return: A wrapper function. """ - def wrapper(*args, **kwargs) -> float: - return float(function(*args, **kwargs)) # Converts numpy dtype to native python type + def wrapper(*args, **kwargs) -> float | int: + result = function(*args, **kwargs) + return int(result) if isinstance(result, int64) else float(result) # Converts numpy dtype to native python type return wrapper diff --git a/src/utils/graphing.py b/src/utils/graphing.py index 2c9a684..5f0f8df 100644 --- a/src/utils/graphing.py +++ b/src/utils/graphing.py @@ -162,7 +162,8 @@ def bar_graph( y_axis_label: str = "", title: str = "", horizontal: bool = False, - color: str | None = None, + color: dict | str | None = None, + color_indicator: str | None = None, hover_data: list = None ) -> Figure: """ @@ -182,9 +183,14 @@ def bar_graph( title=title, orientation=("h" if horizontal else "v"), hover_data=hover_data, - color_discrete_sequence=[ - GeneralConstants.PRIMARY_COLOR if color is None else color - ] + **( + { + "color_discrete_sequence": [GeneralConstants.PRIMARY_COLOR if color is None else color] + } if isinstance(color, str) or color is None else { + "color": color_indicator, + "color_discrete_map": color + } + ) ).update_xaxes( type="category" ) From b0ea78b8c0cdac21a56ad531da581dbebadbe8aa Mon Sep 17 00:00:00 2001 From: Shom770 Date: Wed, 14 Feb 2024 14:52:04 -0500 Subject: [PATCH 19/19] finished auto picklist --- src/page_managers/match_manager.py | 2 +- src/page_managers/picklist_manager.py | 22 ++++--------------- src/pages/4_Picklist.py | 6 ----- ...{6_Custom_Graphs.py => 5_Custom_Graphs.py} | 0 4 files changed, 5 insertions(+), 25 deletions(-) rename src/pages/{6_Custom_Graphs.py => 5_Custom_Graphs.py} (100%) diff --git a/src/page_managers/match_manager.py b/src/page_managers/match_manager.py index 97155c5..5882005 100644 --- a/src/page_managers/match_manager.py +++ b/src/page_managers/match_manager.py @@ -695,7 +695,7 @@ def generate_teleop_graphs( team_numbers, [normal_climbs_by_team, harmonized_climbs_by_team], x_axis_label="Teams", - y_axis_label= ["Normal Climbs", "Harmonized Climbs"], + y_axis_label=["Normal Climbs", "Harmonized Climbs"], y_axis_title="# of Climb Types", title="Climbs by Team", color_map={"Normal Climbs": color_gradient[0], "Harmonized Climbs": color_gradient[1]} diff --git a/src/page_managers/picklist_manager.py b/src/page_managers/picklist_manager.py index 629f084..1630075 100644 --- a/src/page_managers/picklist_manager.py +++ b/src/page_managers/picklist_manager.py @@ -115,10 +115,6 @@ def write_to_notion(self, dataframe: DataFrame) -> None: database_id=(db_id := get_id(EventSpecificConstants.PICKLIST_URL)), properties=properties, icon=icon ) - # Retrieve poroperties of database - db_properties = self.client.databases.query(database_id=db_id)["results"][0]["properties"] - ids = {property_name: property_values["id"] for property_name, property_values in db_properties.items()} - # Find percentiles across all teams percentile_75 = self.calculated_stats.quantile_stat( 0.75, @@ -145,7 +141,8 @@ def write_to_notion(self, dataframe: DataFrame) -> None: } ) # Based off of the percentile between all their stats - team_cycles = self.calculated_stats.average_cycles(int(team_name.split()[1])) + team_number = int(team_name.split()[1]) + team_cycles = self.calculated_stats.average_cycles(team_number) if team_cycles > percentile_75: emoji = "🔵" @@ -164,7 +161,6 @@ def write_to_notion(self, dataframe: DataFrame) -> None: parent={"type": "database_id", "database_id": db_id}, properties={ column: { - "id": ids[column], "number": dataframe[dataframe["Team Number"] == team_name][column].iloc[0] } for column in dataframe.columns if column != "Team Number" } | { @@ -175,7 +171,7 @@ def write_to_notion(self, dataframe: DataFrame) -> None: "object": "block", "type": "embed", "embed": { - "url": f"https://falconvis-{EventSpecificConstants.EVENT_CODE[-3:]}.streamlit.app?team_number=4099" + "url": f"https://falconvis-{EventSpecificConstants.EVENT_CODE[-3:]}.streamlit.app?team_number={team_number}" } } ] @@ -188,19 +184,9 @@ def write_to_notion(self, dataframe: DataFrame) -> None: parent={"type": "database_id", "database_id": db_id}, properties={ column: { - "id": ids[column], "number": dataframe[dataframe["Team Number"] == team_name][column].iloc[0] } for column in dataframe.columns if column != "Team Number" } | { "Team Name": {"id": "title", "title": [{"text": {"content": team_name}}]}, - }, - children=[ - { - "object": "block", - "type": "embed", - "embed": { - "url": f"https://falconvis-{EventSpecificConstants.EVENT_CODE[-3:]}.streamlit.app?team_number=4099" - } - } - ] + } ) diff --git a/src/pages/4_Picklist.py b/src/pages/4_Picklist.py index b27830c..4978ee4 100644 --- a/src/pages/4_Picklist.py +++ b/src/pages/4_Picklist.py @@ -33,9 +33,3 @@ if st.button("📝 Write to Notion Picklist"): picklist_manager.write_to_notion(generated_picklist) - - - - - - diff --git a/src/pages/6_Custom_Graphs.py b/src/pages/5_Custom_Graphs.py similarity index 100% rename from src/pages/6_Custom_Graphs.py rename to src/pages/5_Custom_Graphs.py