From 729ac7ff3a8c4db8034ef3529fc7a8b3babbfc71 Mon Sep 17 00:00:00 2001 From: Vidhisha Balachandran Date: Mon, 29 Sep 2025 23:41:10 +0000 Subject: [PATCH 1/4] minor updates to configs and files --- eureka_ml_insights/data_utils/aime_utils.py | 6 ++++++ eureka_ml_insights/data_utils/transform.py | 2 ++ eureka_ml_insights/models/models.py | 9 ++++++--- eureka_ml_insights/user_configs/__init__.py | 1 + eureka_ml_insights/user_configs/aime.py | 13 ++++++------- eureka_ml_insights/user_configs/gpqa.py | 9 +++------ .../user_configs/vision_language/maze.py | 13 ++++++------- main.py | 8 +++++++- 8 files changed, 37 insertions(+), 24 deletions(-) diff --git a/eureka_ml_insights/data_utils/aime_utils.py b/eureka_ml_insights/data_utils/aime_utils.py index d36742cd..ec94f82a 100644 --- a/eureka_ml_insights/data_utils/aime_utils.py +++ b/eureka_ml_insights/data_utils/aime_utils.py @@ -26,6 +26,12 @@ def parse_output_answer(response): """ numerical_value = None + if response is None: + return None + + if isinstance(response, float): + return response + # Try to find an answer in the "Final Answer: X" format match = re.search(r"Final Answer:\s*([\$]?-?[\d,]+(?:\.\d+)?%?)", response) # If not found, try to find an answer in the "Final Answer: [X]" format diff --git a/eureka_ml_insights/data_utils/transform.py b/eureka_ml_insights/data_utils/transform.py index aafcf296..662ad5a9 100644 --- a/eureka_ml_insights/data_utils/transform.py +++ b/eureka_ml_insights/data_utils/transform.py @@ -348,6 +348,8 @@ class RegexTransform(MultiColumnTransform): occurrence: str = "last" def _transform(self, sentence): + if sentence is None: + return None if self.ignore_case: results = re.findall(self.prompt_pattern, sentence, flags=re.IGNORECASE) else: diff --git a/eureka_ml_insights/models/models.py b/eureka_ml_insights/models/models.py index 82a57caa..8eaf5e46 100644 --- a/eureka_ml_insights/models/models.py +++ b/eureka_ml_insights/models/models.py @@ -13,11 +13,14 @@ import anthropic import requests import tiktoken -from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.identity import AzureCliCredential, DefaultAzureCredential, get_bearer_token_provider from eureka_ml_insights.secret_management import get_secret +credential = DefaultAzureCredential() +# credential = AzureCliCredential() # use AzureCliCredential for Managed Identity + @dataclass class Model(ABC): """This class is used to define the structure of a model class. @@ -374,7 +377,7 @@ def __post_init__(self): "extra-parameters": "pass-through", } except ValueError: - self.bearer_token_provider = get_bearer_token_provider(DefaultAzureCredential(), self.auth_scope) + self.bearer_token_provider = get_bearer_token_provider(credential, self.auth_scope) self.headers = { "Content-Type": "application/json", "Authorization": ("Bearer " + self.bearer_token_provider()), @@ -606,7 +609,7 @@ class AzureOpenAIClientMixIn: def get_client(self): from openai import AzureOpenAI - token_provider = get_bearer_token_provider(DefaultAzureCredential(), self.auth_scope) + token_provider = get_bearer_token_provider(credential, self.auth_scope) return AzureOpenAI( azure_endpoint=self.url, api_version=self.api_version, diff --git a/eureka_ml_insights/user_configs/__init__.py b/eureka_ml_insights/user_configs/__init__.py index ec419308..af196a47 100644 --- a/eureka_ml_insights/user_configs/__init__.py +++ b/eureka_ml_insights/user_configs/__init__.py @@ -86,6 +86,7 @@ MAZE_PIPELINE, MAZE_REPORTING_PIPELINE, MAZE_TEXTONLY_PIPELINE, + MAZE_COT_TEXTONLY_PIPELINE, ) from .vision_language.spatial_grid import ( SPATIAL_GRID_PIPELINE, diff --git a/eureka_ml_insights/user_configs/aime.py b/eureka_ml_insights/user_configs/aime.py index 597d1f0b..8a8ca5ed 100644 --- a/eureka_ml_insights/user_configs/aime.py +++ b/eureka_ml_insights/user_configs/aime.py @@ -13,9 +13,6 @@ PipelineConfig, PromptProcessingConfig, ) -from eureka_ml_insights.configs.model_configs import ( - OAI_GPT4O_2024_11_20_CONFIG, -) from eureka_ml_insights.core import DataProcessing, Inference, PromptProcessing from eureka_ml_insights.core.eval_reporting import EvalReporting from eureka_ml_insights.data_utils import ( @@ -52,8 +49,9 @@ class AIME_PIPELINE(ExperimentConfig): def configure_pipeline( self, model_config: ModelConfig, resume_from: str = None, **kwargs: dict[str, Any] ) -> PipelineConfig: - self.n_repeats = int(kwargs.get('n_repeat', 1)) # Default value is 1 + self.n_repeats = int(kwargs.get('n_repeats', 1)) # Default value is 1 self.max_concurrent = int(kwargs.get('max_concurrent', 1)) # Default value is 1 + eval_model_config = kwargs.get('eval_model_config', None) # data preprocessing self.data_processing_comp = PromptProcessingConfig( component_type=PromptProcessing, @@ -441,7 +439,8 @@ def configure_pipeline( self, model_config: ModelConfig, resume_from: str = None, **kwargs: dict[str, Any] ) -> PipelineConfig: pipeline = super().configure_pipeline(model_config=model_config, resume_from=resume_from,**kwargs) - self.llm_extractor_max_concurrent = int(kwargs.get('llm_extractor_max_concurrent', 10)) # Default value is 1 + self.llm_extractor_max_concurrent = int(kwargs.get('llm_extractor_max_concurrent', 8)) # Default value is 1 + eval_model_config = kwargs.get('eval_model_config', None) answer_col = "extracted_answer" llm_extraction_subpipeline_conf = LLM_EXTRACTION_SUBPIPELINE_MIXIN() self.llm_extraction_subpipeline = llm_extraction_subpipeline_conf.configure_subpipeline( @@ -451,7 +450,7 @@ def configure_pipeline( os.path.dirname(__file__), "../prompt_templates/aime_templates/extract_aime_answer.jinja", ), - llm_extractor_model_config=OAI_GPT4O_2024_11_20_CONFIG, + llm_extractor_model_config=eval_model_config, log_dir=self.log_dir, llm_extractor_max_concurrent=self.llm_extractor_max_concurrent, llm_extractor_answer_transforms=[ @@ -486,4 +485,4 @@ def configure_pipeline( ) -> PipelineConfig: pipeline = super().configure_pipeline(model_config=model_config, resume_from=resume_from,**kwargs) self.data_processing_comp.data_reader_config.init_args["path"] = "lchen001/AIME2025" - return pipeline \ No newline at end of file + return pipeline diff --git a/eureka_ml_insights/user_configs/gpqa.py b/eureka_ml_insights/user_configs/gpqa.py index b1761c07..942a03da 100644 --- a/eureka_ml_insights/user_configs/gpqa.py +++ b/eureka_ml_insights/user_configs/gpqa.py @@ -18,9 +18,6 @@ PipelineConfig, PromptProcessingConfig, ) -from eureka_ml_insights.configs.model_configs import ( - OAI_GPT4O_2024_11_20_CONFIG, -) from eureka_ml_insights.core import ( DataProcessing, EvalReporting, @@ -99,7 +96,7 @@ def configure_pipeline( ), output_dir=os.path.join(self.log_dir, "inference_result"), resume_from=resume_from, - max_concurrent=1, + max_concurrent=32, ) self.preeval_data_post_processing_comp = DataProcessingConfig( component_type=DataProcessing, @@ -141,9 +138,9 @@ def configure_pipeline( os.path.dirname(__file__), "../prompt_templates/gpqa_templates/extract_gpqa_answer.jinja", ), - llm_extractor_model_config=OAI_GPT4O_2024_11_20_CONFIG, + llm_extractor_model_config=kwargs.get('eval_model_config', None), log_dir=self.log_dir, - llm_extractor_max_concurrent=1, + llm_extractor_max_concurrent=32, llm_extractor_answer_transforms=[ RegexTransform( columns="model_output", diff --git a/eureka_ml_insights/user_configs/vision_language/maze.py b/eureka_ml_insights/user_configs/vision_language/maze.py index 1a08d94e..a560f22d 100644 --- a/eureka_ml_insights/user_configs/vision_language/maze.py +++ b/eureka_ml_insights/user_configs/vision_language/maze.py @@ -35,7 +35,6 @@ PromptProcessingConfig, DataJoinConfig, ) -from eureka_ml_insights.configs.model_configs import OAI_GPT4O_2024_11_20_CONFIG """This file contains example user defined configuration classes for the maze task. In order to define a new configuration, a new class must be created that directly or indirectly @@ -141,13 +140,13 @@ def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None, self.inference_llm_answer_extract = InferenceConfig( component_type=Inference, - model_config=OAI_GPT4O_2024_11_20_CONFIG, + model_config=kwargs.get("eval_model_config", None), data_loader_config=DataSetConfig( DataLoader, {"path": os.path.join(self.filter_empty_answer.output_dir, "transformed_data.jsonl")}, ), output_dir=os.path.join(self.log_dir, "llm_answer_extract_inference_result"), - max_concurrent=1 + max_concurrent=64, ) self.data_join = DataJoinConfig( @@ -456,8 +455,8 @@ def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None, class MAZE_TEXTONLY_PIPELINE(MAZE_PIPELINE): """This class extends MAZE_PIPELINE to use text only data.""" - def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None) -> PipelineConfig: - config = super().configure_pipeline(model_config, resume_from) + def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None, **kwargs) -> PipelineConfig: + config = super().configure_pipeline(model_config, resume_from, **kwargs) self.data_processing_comp.data_reader_config.init_args["tasks"] = ( "maze_text_only" ) @@ -478,8 +477,8 @@ class MAZE_REPORTING_PIPELINE(MAZE_PIPELINE): """This method is used to define an eval pipeline with only a metric report component, on the maze dataset.""" - def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None) -> PipelineConfig: - super().configure_pipeline(model_config, resume_from) + def configure_pipeline(self, model_config: ModelConfig, resume_from: str = None, **kwargs) -> PipelineConfig: + super().configure_pipeline(model_config, resume_from, **kwargs) self.preeval_data_post_processing_comp.data_reader_config.init_args["path"] = resume_from # Configure the pipeline return PipelineConfig( diff --git a/main.py b/main.py index ccc9d174..5c964411 100755 --- a/main.py +++ b/main.py @@ -6,7 +6,10 @@ import sys from eureka_ml_insights import user_configs as configs -from eureka_ml_insights.configs import model_configs +from eureka_ml_insights.configs.model_configs import ( + OAI_GPT4O_2024_11_20_CONFIG, +) +from eureka_ml_insights.configs import model_configs as model_configs from eureka_ml_insights.core import Pipeline logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") @@ -106,6 +109,9 @@ def import_from_path(module_path, class_name): init_args["eval_model_config"] = getattr(model_configs, args.eval_model_config) except AttributeError: raise ValueError(f"Model config class {args.eval_model_config} not found.") + else: + logging.warning("No eval_model_config provided. Using OAI_GPT4O_2024_11_20_CONFIG for eval related LLM calls if needed.") + init_args["eval_model_config"] = OAI_GPT4O_2024_11_20_CONFIG if args.resume_from: init_args["resume_from"] = args.resume_from From 158041c39e15f3b35184a3253cb89a77a7872a10 Mon Sep 17 00:00:00 2001 From: Vidhisha Balachandran Date: Wed, 15 Oct 2025 21:09:03 +0000 Subject: [PATCH 2/4] adding simpleqa benchmark --- .../data_utils/simpleqa_utils.py | 32 ++ eureka_ml_insights/metrics/__init__.py | 2 + .../metrics/simpleqa_metrics.py | 102 +++++ .../simpleqa_grader_prompt.jinja | 78 ++++ eureka_ml_insights/user_configs/__init__.py | 1 + eureka_ml_insights/user_configs/simpleqa.py | 388 ++++++++++++++++++ 6 files changed, 603 insertions(+) create mode 100644 eureka_ml_insights/data_utils/simpleqa_utils.py create mode 100644 eureka_ml_insights/metrics/simpleqa_metrics.py create mode 100644 eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja create mode 100644 eureka_ml_insights/user_configs/simpleqa.py diff --git a/eureka_ml_insights/data_utils/simpleqa_utils.py b/eureka_ml_insights/data_utils/simpleqa_utils.py new file mode 100644 index 00000000..fc750f32 --- /dev/null +++ b/eureka_ml_insights/data_utils/simpleqa_utils.py @@ -0,0 +1,32 @@ +import ast +import json +import math +import re +from dataclasses import dataclass + +import pandas as pd + +from .transform import DFTransformBase + +@dataclass +class SimpleQA_MetadataExplode(DFTransformBase): + metadata_column: str + + def transform(self, df: pd.DataFrame) -> pd.DataFrame: + df[self.metadata_column] = df[self.metadata_column].apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else x) + self.explode_metadata(df) + return df + + def explode_metadata(self, df): + # TODO this would break if the first row does not have all the metrics, e.g. due invalid inference results + for col_name in df[self.metadata_column][0].keys(): + df[col_name] = df.apply( + lambda row: ( + row[self.metadata_column].get(col_name, None) + if isinstance(row[self.metadata_column], dict) + else None + ), + axis=1, + ) + df.drop(self.metadata_column, axis=1, inplace=True) + return df \ No newline at end of file diff --git a/eureka_ml_insights/metrics/__init__.py b/eureka_ml_insights/metrics/__init__.py index 3213bd96..73313293 100644 --- a/eureka_ml_insights/metrics/__init__.py +++ b/eureka_ml_insights/metrics/__init__.py @@ -31,11 +31,13 @@ ObjectRecognitionMetric, SpatialAndLayoutReasoningMetric, ) +from .simpleqa_metrics import SimpleQA_Metric __all__ = [ Metric, ClassicMetric, CompositeMetric, + SimpleQA_Metric, SpatialAndLayoutReasoningMetric, ObjectRecognitionMetric, CocoObjectDetectionMetric, diff --git a/eureka_ml_insights/metrics/simpleqa_metrics.py b/eureka_ml_insights/metrics/simpleqa_metrics.py new file mode 100644 index 00000000..ed5d5c47 --- /dev/null +++ b/eureka_ml_insights/metrics/simpleqa_metrics.py @@ -0,0 +1,102 @@ +import re +from eureka_ml_insights.metrics.metrics_base import CompositeMetric +from eureka_ml_insights.metrics.reports import NumericalAggregator + +class SimpleQA_Metric(CompositeMetric): + """ + Composite metric for evaluating SimpleQA. + """ + + def __init__(self): + super().__init__() + + def __evaluate__(self, row): + return self.process_row(row) + + def process_row(self, row): + grading_response = row["model_output"] + match = re.search(r"(A|B|C)", grading_response) + grade_letter = match.group(0) if match else "C" # Default to "NOT_ATTEMPTED" if no match + + # Metrics based on grading response + is_correct = grade_letter == "A" + is_incorrect = grade_letter == "B" + is_not_attempted = grade_letter == "C" + + return { + "grade": grade_letter, + "is_correct": is_correct, + "is_incorrect": is_incorrect, + "is_not_attempted": is_not_attempted, + } + +class SimpleQA_CorrectGivenAttemptedAggregator(NumericalAggregator): + """This class implements a custom aggregator that computes accuracy as the ratio of correct to attempted answers. + """ + + def __init__(self, is_correct_column_name, is_incorrect_column_name, is_not_attempted_column_name, output_dir, group_by=None, **kwargs): + """ + args: + - is_correct_column_name (str): The name of the column containing the correct values. + - is_incorrect_column_name (str): The name of the column containing the incorrect values. + - is_not_attempted_column_name (str): The name of the column containing the not attempted values. + - output_dir (str): The directory where the aggregated result will be stored. + - groupby (str, optional): The column name to group the data by. Defaults to None. + """ + super().__init__([is_correct_column_name, is_incorrect_column_name, is_not_attempted_column_name], output_dir, group_by=group_by, **kwargs) + self.is_correct_column_name = is_correct_column_name + self.is_incorrect_column_name = is_incorrect_column_name + self.is_not_attempted_column_name = is_not_attempted_column_name + + def _aggregate(self, data): + total_attempted = data[self.is_correct_column_name].sum() + data[self.is_incorrect_column_name].sum() + if total_attempted == 0: + divided_result = 0.0 # Avoid division by zero; define accuracy as 0 if no attempts were made + else: + divided_result = data[self.is_correct_column_name].sum() / total_attempted # TODO: handle NaNs, negative nums if any + self.aggregated_result = {"accuracy_given_attempted": divided_result} + + def _aggregate_grouped(self, data): + gb = data.groupby(self.group_by) + total_attempted = gb[self.is_correct_column_name].sum() + gb[self.is_incorrect_column_name].sum() + # Avoid division by zero; define accuracy as 0 if no attempts were made + divided_result = (gb[self.is_correct_column_name].sum() / total_attempted.replace(0, 1)).to_dict() # TODO: handle NaNs, negative nums if any + self.aggregated_result = {"accuracy_given_attempted": divided_result} + + +class SimpleQA_CorrectGivenAttemptedAvgPass1Aggregator(SimpleQA_CorrectGivenAttemptedAggregator): + """This class implements a custom aggregator that computes accuracy as the ratio of correct to attempted answers. + """ + + def __init__(self, is_correct_column_name, is_incorrect_column_name, is_not_attempted_column_name, output_dir, group_by=None, **kwargs): + """ + args: + - is_correct_column_name (str): The name of the column containing the correct values. + - is_incorrect_column_name (str): The name of the column containing the incorrect values. + - is_not_attempted_column_name (str): The name of the column containing the not attempted values. + - output_dir (str): The directory where the aggregated result will be stored. + - groupby (str, optional): The column name to group the data by. Defaults to None. + """ + super().__init__(is_correct_column_name, is_incorrect_column_name, is_not_attempted_column_name, output_dir, group_by=group_by, **kwargs) + + def _aggregate(self, data): + if 'data_repeat_id' not in data.columns: + super()._aggregate(data) + else: + self.group_by = 'data_repeat_id' + super()._aggregate_grouped(data) + # Calculate the mean of the grouped results + grouped_results = self.aggregated_result["accuracy_given_attempted"] + mean_divided_result = sum(grouped_results.values()) / len(grouped_results) if len(grouped_results) > 0 else 0.0 + self.aggregated_result = {"accuracy_given_attempted": mean_divided_result} + + def _aggregate_grouped(self, data): + original_group_by = self.group_by + gb = data.groupby(original_group_by) + # For each group, apply the _aggregate method + group_results = {} + for name, group in gb: + # Create a temporary aggregator instance to process this group + self._aggregate(group) + group_results[name] = self.aggregated_result["accuracy_given_attempted"] + self.aggregated_result = {"accuracy_given_attempted": group_results} \ No newline at end of file diff --git a/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja b/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja new file mode 100644 index 00000000..6e423289 --- /dev/null +++ b/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja @@ -0,0 +1,78 @@ +Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. +First, I will give examples of each grade, and then you will grade a new example. + + +The following are examples of CORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia Obama and Sasha Obama +Predicted answer 1: sasha and malia obama +Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check +Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. +``` +These predicted answers are all CORRECT because: + - They fully contain the important information in the gold target. + - They do not contain any information that contradicts the gold target. + - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. + - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. + + +The following are examples of INCORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: Malia. +Predicted answer 2: Malia, Sasha, and Susan. +Predicted answer 3: Barack Obama does not have any children. +Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. +Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. +Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? +Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. +``` +These predicted answers are all INCORRECT because: + - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. + + +The following are examples of NOT_ATTEMPTED predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: I don't know. +Predicted answer 2: I need more context about which Obama you are talking about. +Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. +Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. +``` +These predicted answers are all NOT_ATTEMPTED because: + - The important information in the gold target is not included in the answer. + - No statements in the answer contradict the gold target. + + +Also note the following things: +- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". + - Predicted answers "120k", "124k", and 115k" are all CORRECT. + - Predicted answers "100k" and "113k" are INCORRECT. + - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. +- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. + - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. +- Do not punish predicted answers if they omit information that would be clearly inferred from the question. + - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". + - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. + - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. + - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. +- Do not punish for typos in people's name if it's clearly the same name. + - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". + + +Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +``` +Question: {prompt} +Gold target: {ground_truth} +Predicted answer: {model_output} +``` + +Grade the predicted answer of this new question as one of: +A: CORRECT +B: INCORRECT +C: NOT_ATTEMPTED + +Just return the letters "A", "B", or "C", with no text around it. \ No newline at end of file diff --git a/eureka_ml_insights/user_configs/__init__.py b/eureka_ml_insights/user_configs/__init__.py index f0cb5c1d..1187c329 100644 --- a/eureka_ml_insights/user_configs/__init__.py +++ b/eureka_ml_insights/user_configs/__init__.py @@ -79,6 +79,7 @@ ) from .nphard_tsp import NPHARD_TSP_PIPELINE, NPHARD_TSP_PIPELINE_MULTIPLE_RUNS from .omni_math import Omni_Math_Parallel_PIPELINE, Omni_Math_PIPELINE +from .simpleqa import SimpleQA_PIPELINE from .toxigen import ( ToxiGen_Discriminative_PIPELINE, ToxiGen_Generative_PIPELINE, diff --git a/eureka_ml_insights/user_configs/simpleqa.py b/eureka_ml_insights/user_configs/simpleqa.py new file mode 100644 index 00000000..5351b783 --- /dev/null +++ b/eureka_ml_insights/user_configs/simpleqa.py @@ -0,0 +1,388 @@ +import os +from typing import Any + +from eureka_ml_insights.core import ( + Inference, + PromptProcessing, +) + +from eureka_ml_insights.core.data_processing import DataProcessing +from eureka_ml_insights.core.eval_reporting import EvalReporting +from eureka_ml_insights.data_utils.ba_calendar_utils import BA_Calendar_ExtractAnswer +from eureka_ml_insights.data_utils.data import ( + DataLoader, + DataReader, + HFDataReader, +) +from eureka_ml_insights.data_utils.omni_math_utils import Omni_Math_ParseLabel, Omni_Math_ParseSolution +from eureka_ml_insights.data_utils.transform import AddColumn, AddColumnAndData, ColumnRename, CopyColumn, ExtractUsageTransform, MajorityVoteTransform, MultiplyTransform, ReplaceStringsTransform, RunPythonTransform, SamplerTransform, SequenceTransform +from eureka_ml_insights.metrics.reports import ( + AverageAggregator, + BiLevelAggregator, + CountAggregator, +) + +from ..configs.config import ( + AggregatorConfig, + DataProcessingConfig, + DataSetConfig, + EvalReportingConfig, + InferenceConfig, + MetricConfig, + PipelineConfig, + PromptProcessingConfig, + ModelConfig, +) +from ..configs.experiment_config import ExperimentConfig +from ..metrics import SimpleQA_Metric +from ..metrics.simpleqa_metrics import SimpleQA_CorrectGivenAttemptedAggregator +from ..data_utils.simpleqa_utils import SimpleQA_MetadataExplode + +class SimpleQA_PIPELINE(ExperimentConfig): + """This class specifies the config for running SimpleQA benchmark on any model""" + + def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_from=None, eval_model_config=None, **kwargs) -> PipelineConfig: + # data preprocessing + + self.data_processing_comp = DataProcessingConfig( + component_type=DataProcessing, + data_reader_config=DataSetConfig( + HFDataReader, + { + "path": "lighteval/SimpleQA", + "split": "test", + "transform": SequenceTransform([ + SamplerTransform(sample_count=10, random_seed=42), + MultiplyTransform(n_repeats=int(kwargs.get("n_repeats", 1))), + ColumnRename(name_mapping={"problem":"prompt", "answer":"ground_truth"}), + SimpleQA_MetadataExplode(metadata_column="metadata"), + ]), + } + ), + output_dir=os.path.join(self.log_dir, "data_processing_output"), + ) + + # inference component + self.inference_comp = InferenceConfig( + component_type=Inference, + model_config=model_config, + data_loader_config=DataSetConfig( + DataLoader, + { + "path": os.path.join(self.data_processing_comp.output_dir, "transformed_data.jsonl"), + }, + ), + output_dir=os.path.join(self.log_dir, "inference_result"), + resume_from=resume_from, + max_concurrent=int(kwargs.get("max_concurrent", 10)), + ) + + # eval data preprocessing + self.eval_data_processing_comp = PromptProcessingConfig( + component_type=PromptProcessing, + prompt_template_path=os.path.join(os.path.dirname(__file__), "../prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja"), + data_reader_config=DataSetConfig( + DataReader, + {"path": os.path.join(self.inference_comp.output_dir, "inference_result.jsonl"), + "transform": SequenceTransform([ + CopyColumn("model_output", "generated_solution"), + ColumnRename(name_mapping={"n_output_tokens":"gen_solution_n_output_tokens", + "usage": "gen_solution_usage", + "is_valid": "gen_solution_is_valid"}), + ])}, + ), + output_dir=os.path.join(self.log_dir, "eval_data_processing_output"), + ) + + # inference component + self.eval_inference_comp = InferenceConfig( + component_type=Inference, + model_config=eval_model_config, + data_loader_config=DataSetConfig( + DataLoader, + {"path": os.path.join(self.eval_data_processing_comp.output_dir, "transformed_data.jsonl") + }, + ), + output_dir=os.path.join(self.log_dir, "eval_inference_result"), + resume_from=eval_resume_from, + max_concurrent=40, + ) + + # Configure the evaluation and reporting component for evaluation and dataset level aggregation + self.evalreporting_comp = EvalReportingConfig( + component_type=EvalReporting, + data_reader_config=DataSetConfig( + DataReader, + { + "path": os.path.join(self.eval_inference_comp.output_dir, "inference_result.jsonl"), + "format": ".jsonl", + "transform": SequenceTransform( + [ + ExtractUsageTransform(model_config, usage_column="gen_solution_usage", n_tokens_column="gen_solution_n_output_tokens"), + ] + ), + }, + ), + metric_config=MetricConfig(SimpleQA_Metric), + aggregator_configs=[ + AggregatorConfig( + AverageAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + "SimpleQA_Metric_is_incorrect", + "SimpleQA_Metric_is_not_attempted", + ], + "filename_base": "Metrics_SeparateRuns", + "group_by": "data_repeat_id", + }, + ), + AggregatorConfig(SimpleQA_CorrectGivenAttemptedAggregator, + { + "is_correct_column_name": "SimpleQA_Metric_is_correct", + "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", + "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted", + "group_by": "data_repeat_id", + "filename_base": "AccuracyGivenAttempted_SeparateRuns", + } + ), + + # the next three reports take the average and std for all repeats + # the resulting numbers are the average and std of N pass@1 scores, where N is number of repeats + AggregatorConfig(BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + "SimpleQA_Metric_is_incorrect", + "SimpleQA_Metric_is_not_attempted" + ], + "first_groupby": "data_repeat_id", + "filename_base": "Metrics_Avg", + "agg_fn": "mean" + }), + + # AggregatorConfig(SimpleQA_CorrectGivenAttemptedAggregator, + # { + # "is_correct_column_name": "SimpleQA_Metric_is_correct", + # "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", + # "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted", + # "group_by": "data_repeat_id", + # "filename_base": "AccuracyGivenAttempted_SeparateRuns", + # } + # ), + + AggregatorConfig(BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + "SimpleQA_Metric_is_incorrect", + "SimpleQA_Metric_is_not_attempted" + ], + "first_groupby": ["data_repeat_id", "topic"], + "second_groupby": "topic", + "filename_base": "Metrics_Avg_by_topic", + "agg_fn": "mean" + }), + + # reports for average completion usage + AggregatorConfig(BiLevelAggregator, + { + "column_names": ["usage_completion"], + "first_groupby": "data_point_id", + "filename_base": "UsageCompletion", + "agg_fn": "mean" + }), + AggregatorConfig(BiLevelAggregator, + { + "column_names": ["usage_completion"], + "first_groupby": ["data_point_id", "topic"], + "second_groupby": "topic", + "filename_base": "UsageCompletion_by_topic", + "agg_fn": "mean" + }), + + ], + output_dir=os.path.join(self.log_dir, "eval_report"), + ) + + # Aggregate the results by best of n + self.bon_evalreporting_comp = EvalReportingConfig( + component_type=EvalReporting, + data_reader_config=DataSetConfig( + DataReader, + { + "path": os.path.join(self.evalreporting_comp.output_dir, "metric_results.jsonl"), + "format": ".jsonl", + }, + ), + aggregator_configs=[ + AggregatorConfig( + BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + ], + "first_groupby": "data_point_id", + "filename_base": "Correctness_BestofN", + "normalize": True, + "agg_fn": "max", + }, + ), + AggregatorConfig( + BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + ], + "first_groupby": "data_point_id", + "second_groupby": "topic", + "filename_base": "Correctness_BestOfN_by_topic", + "normalize": True, + "agg_fn": "max", + }, + ), + + # aggregates results by data_point_id and takes the sum of usage for completion tokens + AggregatorConfig( + BiLevelAggregator, + { + "column_names": [ + "usage_completion" + ], + "first_groupby": "data_point_id", + "filename_base": "UsageCompletion_BestOfN", + "agg_fn": "sum" + }, + ), + ], + output_dir=os.path.join(self.log_dir, "bestofn_eval_report"), + ) + # Aggregate the results by worst of n + self.won_evalreporting_comp = EvalReportingConfig( + component_type=EvalReporting, + data_reader_config=DataSetConfig( + DataReader, + { + "path": os.path.join(self.evalreporting_comp.output_dir, "metric_results.jsonl"), + "format": ".jsonl", + }, + ), + aggregator_configs=[ + AggregatorConfig( + BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + ], + "first_groupby": "data_point_id", + "filename_base": "Correctness_WorstofN", + "normalize": True, + "agg_fn": "min", + }, + ), + AggregatorConfig( + BiLevelAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct", + ], + "first_groupby": "data_point_id", + "second_groupby": "topic", + "filename_base": "Correctness_WorstOfN_by_topic", + "normalize": True, + "agg_fn": "min", + }, + ), + ], + output_dir=os.path.join(self.log_dir, "worstofn_eval_report"), + ) + + # Aggregate the results by a majority vote + self.maj_vote_data_post_processing = DataProcessingConfig( + component_type=DataProcessing, + data_reader_config=DataSetConfig( + DataReader, + { + "path": os.path.join(self.evalreporting_comp.output_dir, "metric_results.jsonl"), + "format": ".jsonl", + "transform": SequenceTransform( + [ + ColumnRename( + name_mapping={ + "SimpleQA_Metric_grade": "SimpleQA_Metric_grade_onerun", + } + ), + AddColumn("SimpleQA_Metric_grade"), + MajorityVoteTransform(model_output_col="SimpleQA_Metric_grade_onerun", model_label_column="SimpleQA_Metric_is_correct"), + CopyColumn("majority_vote", "SimpleQA_Metric_grade"), + CopyColumn("majority_label", "SimpleQA_Metric_is_correct_majority_vote"), + AddColumnAndData("count", 1), + + ] + ), + }, + ), + output_dir=os.path.join(self.log_dir, "data_majvote_output"), + ) + + self.majvote_evalreporting_comp = EvalReportingConfig( + component_type=EvalReporting, + data_reader_config=DataSetConfig( + DataReader, + { + "path": os.path.join(self.maj_vote_data_post_processing.output_dir, "transformed_data.jsonl"), + "format": ".jsonl", + "transform": SequenceTransform( + [ + RunPythonTransform("df = df[df['data_repeat_id'] == 'repeat_0']"), + ] + ), + }, + ), + aggregator_configs=[ + AggregatorConfig( + AverageAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct_majority_vote", + ], + "filename_base": "Correctness_MajVote", + }, + ), + AggregatorConfig( + AverageAggregator, + { + "column_names": [ + "SimpleQA_Metric_is_correct_majority_vote", + ], + "filename_base": "Correctness_MajVote_by_topic", + "group_by": "topic", + }, + ), + AggregatorConfig(CountAggregator, + { + "column_names": [ + "count", + ], + "group_by": "topic", + "filename_base": "NumExamples_by_topic", + }), + ], + output_dir=os.path.join(self.log_dir, "majvote_eval_report"), + ) + + # Configure the pipeline + return PipelineConfig( + [ + self.data_processing_comp, + self.inference_comp, + self.eval_data_processing_comp, + self.eval_inference_comp, + self.evalreporting_comp, + self.bon_evalreporting_comp, + self.won_evalreporting_comp, + self.maj_vote_data_post_processing, + self.majvote_evalreporting_comp, + ], + self.log_dir, + ) \ No newline at end of file From 2bf7b4037d4403f804fc6c9f01b32cf7b0d58f54 Mon Sep 17 00:00:00 2001 From: Vidhisha Balachandran Date: Fri, 17 Oct 2025 21:07:07 +0000 Subject: [PATCH 3/4] updates to simpleqa --- .../metrics/simpleqa_metrics.py | 24 ++++---- .../simpleqa_grader_prompt.jinja | 6 +- eureka_ml_insights/user_configs/simpleqa.py | 59 ++++++++++++++----- 3 files changed, 59 insertions(+), 30 deletions(-) diff --git a/eureka_ml_insights/metrics/simpleqa_metrics.py b/eureka_ml_insights/metrics/simpleqa_metrics.py index ed5d5c47..439c0ed2 100644 --- a/eureka_ml_insights/metrics/simpleqa_metrics.py +++ b/eureka_ml_insights/metrics/simpleqa_metrics.py @@ -30,7 +30,7 @@ def process_row(self, row): "is_not_attempted": is_not_attempted, } -class SimpleQA_CorrectGivenAttemptedAggregator(NumericalAggregator): +class SQA_CGAAggregator(NumericalAggregator): """This class implements a custom aggregator that computes accuracy as the ratio of correct to attempted answers. """ @@ -64,7 +64,7 @@ def _aggregate_grouped(self, data): self.aggregated_result = {"accuracy_given_attempted": divided_result} -class SimpleQA_CorrectGivenAttemptedAvgPass1Aggregator(SimpleQA_CorrectGivenAttemptedAggregator): +class SQA_CGAAvgPass1Aggregator(SQA_CGAAggregator): """This class implements a custom aggregator that computes accuracy as the ratio of correct to attempted answers. """ @@ -91,12 +91,14 @@ def _aggregate(self, data): self.aggregated_result = {"accuracy_given_attempted": mean_divided_result} def _aggregate_grouped(self, data): - original_group_by = self.group_by - gb = data.groupby(original_group_by) - # For each group, apply the _aggregate method - group_results = {} - for name, group in gb: - # Create a temporary aggregator instance to process this group - self._aggregate(group) - group_results[name] = self.aggregated_result["accuracy_given_attempted"] - self.aggregated_result = {"accuracy_given_attempted": group_results} \ No newline at end of file + if self.group_by == 'data_repeat_id': + self._aggregate(data) + else: + original_group_by = self.group_by + gb = data.groupby(original_group_by) + # For each group, apply the _aggregate method + group_results = {} + for name, group in gb: + self._aggregate(group) + group_results[name] = self.aggregated_result["accuracy_given_attempted"] + self.aggregated_result = {"accuracy_given_attempted": group_results} \ No newline at end of file diff --git a/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja b/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja index 6e423289..ae8cd96e 100644 --- a/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja +++ b/eureka_ml_insights/prompt_templates/simpleqa_templates/simpleqa_grader_prompt.jinja @@ -65,9 +65,9 @@ Also note the following things: Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. ``` -Question: {prompt} -Gold target: {ground_truth} -Predicted answer: {model_output} +Question: {{prompt}} +Gold target: {{ground_truth}} +Predicted answer: {{model_output}} ``` Grade the predicted answer of this new question as one of: diff --git a/eureka_ml_insights/user_configs/simpleqa.py b/eureka_ml_insights/user_configs/simpleqa.py index 5351b783..70ad300b 100644 --- a/eureka_ml_insights/user_configs/simpleqa.py +++ b/eureka_ml_insights/user_configs/simpleqa.py @@ -35,7 +35,7 @@ ) from ..configs.experiment_config import ExperimentConfig from ..metrics import SimpleQA_Metric -from ..metrics.simpleqa_metrics import SimpleQA_CorrectGivenAttemptedAggregator +from ..metrics.simpleqa_metrics import SQA_CGAAggregator, SQA_CGAAvgPass1Aggregator from ..data_utils.simpleqa_utils import SimpleQA_MetadataExplode class SimpleQA_PIPELINE(ExperimentConfig): @@ -52,7 +52,7 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr "path": "lighteval/SimpleQA", "split": "test", "transform": SequenceTransform([ - SamplerTransform(sample_count=10, random_seed=42), + # SamplerTransform(sample_count=100, random_seed=42), MultiplyTransform(n_repeats=int(kwargs.get("n_repeats", 1))), ColumnRename(name_mapping={"problem":"prompt", "answer":"ground_truth"}), SimpleQA_MetadataExplode(metadata_column="metadata"), @@ -137,7 +137,7 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr "group_by": "data_repeat_id", }, ), - AggregatorConfig(SimpleQA_CorrectGivenAttemptedAggregator, + AggregatorConfig(SQA_CGAAggregator, { "is_correct_column_name": "SimpleQA_Metric_is_correct", "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", @@ -160,17 +160,6 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr "filename_base": "Metrics_Avg", "agg_fn": "mean" }), - - # AggregatorConfig(SimpleQA_CorrectGivenAttemptedAggregator, - # { - # "is_correct_column_name": "SimpleQA_Metric_is_correct", - # "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", - # "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted", - # "group_by": "data_repeat_id", - # "filename_base": "AccuracyGivenAttempted_SeparateRuns", - # } - # ), - AggregatorConfig(BiLevelAggregator, { "column_names": [ @@ -182,7 +171,25 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr "second_groupby": "topic", "filename_base": "Metrics_Avg_by_topic", "agg_fn": "mean" - }), + }), + + AggregatorConfig(SQA_CGAAvgPass1Aggregator, + { + "is_correct_column_name": "SimpleQA_Metric_is_correct", + "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", + "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted", + "filename_base": "AccuracyGivenAttempted_Avg", + } + ), + AggregatorConfig(SQA_CGAAvgPass1Aggregator, + { + "is_correct_column_name": "SimpleQA_Metric_is_correct", + "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect", + "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted", + "filename_base": "AccuracyGivenAttempted_Avg_by_topic", + "group_by": "topic", + } + ), # reports for average completion usage AggregatorConfig(BiLevelAggregator, @@ -314,8 +321,16 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr ), AddColumn("SimpleQA_Metric_grade"), MajorityVoteTransform(model_output_col="SimpleQA_Metric_grade_onerun", model_label_column="SimpleQA_Metric_is_correct"), - CopyColumn("majority_vote", "SimpleQA_Metric_grade"), + RunPythonTransform("df = df.rename_axis(index={'data_point_id': 'data_point_id_idx'})"), CopyColumn("majority_label", "SimpleQA_Metric_is_correct_majority_vote"), + MajorityVoteTransform(model_output_col="SimpleQA_Metric_grade_onerun", model_label_column="SimpleQA_Metric_is_incorrect"), + RunPythonTransform("df = df.rename_axis(index={'data_point_id': 'data_point_id_idx'})"), + CopyColumn("majority_label", "SimpleQA_Metric_is_incorrect_majority_vote"), + MajorityVoteTransform(model_output_col="SimpleQA_Metric_grade_onerun", model_label_column="SimpleQA_Metric_is_not_attempted"), + RunPythonTransform("df = df.rename_axis(index={'data_point_id': 'data_point_id_idx'})"), + CopyColumn("majority_label", "SimpleQA_Metric_is_not_attempted_majority_vote"), + CopyColumn("majority_vote", "SimpleQA_Metric_majority_vote_grade"), + RunPythonTransform("df = df.drop(columns=['SimpleQA_Metric_grade_onerun', 'majority_label', 'majority_vote'])"), AddColumnAndData("count", 1), ] @@ -345,6 +360,8 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr { "column_names": [ "SimpleQA_Metric_is_correct_majority_vote", + "SimpleQA_Metric_is_incorrect_majority_vote", + "SimpleQA_Metric_is_not_attempted_majority_vote" ], "filename_base": "Correctness_MajVote", }, @@ -354,11 +371,21 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr { "column_names": [ "SimpleQA_Metric_is_correct_majority_vote", + "SimpleQA_Metric_is_incorrect_majority_vote", + "SimpleQA_Metric_is_not_attempted_majority_vote" ], "filename_base": "Correctness_MajVote_by_topic", "group_by": "topic", }, ), + AggregatorConfig(SQA_CGAAggregator, + { + "is_correct_column_name": "SimpleQA_Metric_is_correct_majority_vote", + "is_incorrect_column_name": "SimpleQA_Metric_is_incorrect_majority_vote", + "is_not_attempted_column_name": "SimpleQA_Metric_is_not_attempted_majority_vote", + "filename_base": "AccuracyGivenAttempted_MajVote", + } + ), AggregatorConfig(CountAggregator, { "column_names": [ From aefbb5a848afae495ced7edb65e6e4ae9e40843c Mon Sep 17 00:00:00 2001 From: Vidhisha Balachandran Date: Fri, 17 Oct 2025 22:39:45 +0000 Subject: [PATCH 4/4] adding simpleqa verified --- .../metrics/simpleqa_metrics.py | 7 +++-- eureka_ml_insights/user_configs/__init__.py | 4 ++- eureka_ml_insights/user_configs/simpleqa.py | 28 +++++++++++++++++-- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/eureka_ml_insights/metrics/simpleqa_metrics.py b/eureka_ml_insights/metrics/simpleqa_metrics.py index 439c0ed2..f978f36f 100644 --- a/eureka_ml_insights/metrics/simpleqa_metrics.py +++ b/eureka_ml_insights/metrics/simpleqa_metrics.py @@ -15,8 +15,11 @@ def __evaluate__(self, row): def process_row(self, row): grading_response = row["model_output"] - match = re.search(r"(A|B|C)", grading_response) - grade_letter = match.group(0) if match else "C" # Default to "NOT_ATTEMPTED" if no match + if grading_response is None or str(grading_response)=="nan": + grade_letter = "C" # Default to "NOT_ATTEMPTED" if there is no grading response + else: + match = re.search(r"(A|B|C)", grading_response) + grade_letter = match.group(0) if match else "C" # Default to "NOT_ATTEMPTED" if no match # Metrics based on grading response is_correct = grade_letter == "A" diff --git a/eureka_ml_insights/user_configs/__init__.py b/eureka_ml_insights/user_configs/__init__.py index 1187c329..262211f8 100644 --- a/eureka_ml_insights/user_configs/__init__.py +++ b/eureka_ml_insights/user_configs/__init__.py @@ -79,7 +79,7 @@ ) from .nphard_tsp import NPHARD_TSP_PIPELINE, NPHARD_TSP_PIPELINE_MULTIPLE_RUNS from .omni_math import Omni_Math_Parallel_PIPELINE, Omni_Math_PIPELINE -from .simpleqa import SimpleQA_PIPELINE +from .simpleqa import SimpleQA_PIPELINE, SimpleQA_Verified_PIPELINE from .toxigen import ( ToxiGen_Discriminative_PIPELINE, ToxiGen_Generative_PIPELINE, @@ -182,4 +182,6 @@ FLICKR30K_PIPELINE, NOCAPS_PIPELINE, VSTAR_BENCH_PIPELINE, + SimpleQA_PIPELINE, + SimpleQA_Verified_PIPELINE, ] diff --git a/eureka_ml_insights/user_configs/simpleqa.py b/eureka_ml_insights/user_configs/simpleqa.py index 70ad300b..77c2b791 100644 --- a/eureka_ml_insights/user_configs/simpleqa.py +++ b/eureka_ml_insights/user_configs/simpleqa.py @@ -52,7 +52,7 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr "path": "lighteval/SimpleQA", "split": "test", "transform": SequenceTransform([ - # SamplerTransform(sample_count=100, random_seed=42), + SamplerTransform(sample_count=100, random_seed=42), MultiplyTransform(n_repeats=int(kwargs.get("n_repeats", 1))), ColumnRename(name_mapping={"problem":"prompt", "answer":"ground_truth"}), SimpleQA_MetadataExplode(metadata_column="metadata"), @@ -85,6 +85,10 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr DataReader, {"path": os.path.join(self.inference_comp.output_dir, "inference_result.jsonl"), "transform": SequenceTransform([ + AddColumn("generated_solution"), + AddColumn("gen_solution_n_output_tokens"), + AddColumn("gen_solution_usage"), + AddColumn("gen_solution_is_valid"), CopyColumn("model_output", "generated_solution"), ColumnRename(name_mapping={"n_output_tokens":"gen_solution_n_output_tokens", "usage": "gen_solution_usage", @@ -412,4 +416,24 @@ def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_fr self.majvote_evalreporting_comp, ], self.log_dir, - ) \ No newline at end of file + ) + +class SimpleQA_Verified_PIPELINE(SimpleQA_PIPELINE): + """This class specifies the config for running SimpleQA Verified benchmark on any model""" + + def configure_pipeline(self, model_config=None, resume_from=None, eval_resume_from=None, eval_model_config=None, **kwargs) -> PipelineConfig: + # call the parent class method to get the base pipeline config + pipeline_config = super().configure_pipeline( + model_config=model_config, + resume_from=resume_from, + eval_resume_from=eval_resume_from, + eval_model_config=eval_model_config, + **kwargs + ) + + # modify the data processing component to use the Verified split + self.data_processing_comp.data_reader_config.init_args["path"] = "google/simpleqa-verified" + self.data_processing_comp.data_reader_config.init_args["split"] = "eval" + num_transforms = len(self.data_processing_comp.data_reader_config.init_args["transform"].transforms) + self.data_processing_comp.data_reader_config.init_args["transform"].transforms = self.data_processing_comp.data_reader_config.init_args["transform"].transforms[0:num_transforms-1] # remove last transform which is SimpleQA_MetadataExplode + return pipeline_config \ No newline at end of file