From a8d84577ad8be0f1298d85f9310c61cf088e3f64 Mon Sep 17 00:00:00 2001 From: unnir Date: Thu, 11 Jul 2024 17:19:47 +0200 Subject: [PATCH] v0.2.0 --- README.md | 182 ++++++++++++--- augini/__init__.py | 4 +- augini/core.py | 31 ++- directory_contents.md | 474 ---------------------------------------- examples/test_augini.py | 68 +++--- pyproject.toml | 6 +- requirements.txt | 2 +- 7 files changed, 207 insertions(+), 560 deletions(-) delete mode 100644 directory_contents.md diff --git a/README.md b/README.md index bab369a..db060a9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,39 @@ -# Augini +# Augini: AI-Powered Data Augmentation, Generation, Labeling, and Anonymization + +Augini is a versatile Python framework that leverages AI for comprehensive data manipulation. It uses large language models to augment, generate, and anonymize tabular data, creating realistic and privacy-preserving datasets. + + +## Data Augmentation: + +- Enhance existing datasets with AI-generated features +- Add contextual information based on current data +- Infuse domain knowledge from LLMs + + +## Synthetic Data Generation: + +- Create entirely new, realistic datasets +- Maintain statistical properties of original data +- Generate diverse, coherent synthetic profiles + + +## Data Anonymization: + +- Implement k-anonymity and l-diversity +- Generate synthetic identifiers +- Balance privacy and data utility + + + +## Use Cases + +- Augment ML training datasets +- Generate privacy-safe data for sharing +- Automatic labeling using state-of-the-art AI models +- Create synthetic data for software testing +- Develop realistic scenarios for business planning +- Produce diverse datasets for research and education -Augini is a Python framework for generating synthetic tabular data using AI. It leverages the power of language models to create realistic, fictional data based on existing datasets. ## Installation @@ -17,8 +50,13 @@ Here's a simple example of how to use Augini: from augini import Augini import pandas as pd -# Initialize Augini -augini = Augini(api_key="your_api_key", use_openrouter=True) +api_key = "OpenAI or OpenRouter" + +# OpenAI +augini = Augini(api_key=api_key, debug=False, use_openrouter=False, model='gpt-4-turbo') + +# OpenRouter +augini = Augini(api_key=api_key, use_openrouter=True, model='meta-llama/llama-3-8b-instruct') # Create a sample DataFrame data = { @@ -34,16 +72,7 @@ result_df = augini.augment_columns(df, 'NAME', 'OCCUPATION', 'FAVORITE_DRINK') print(result_df) ``` -## Features - -- Generate synthetic data based on existing datasets -- Customizable prompts for data generation -- Support for both OpenAI API and OpenRouter -- Asynchronous processing for improved performance -## Extending and Enriching Data - -Augini can be used to extend, augment, and enrich your datasets by adding synthetic features and bringing knowledge from language models to your data. ### Adding Multiple Features @@ -64,16 +93,6 @@ result_df = augini.augment_single(df, 'QuirkyPet', custom_prompt=custom_prompt) print(result_df) ``` -### Anonymizing Data - -You can anonymize sensitive information in your dataset by generating synthetic data: - -```python -anonymize_prompt = "Create an anonymous profile for the person based on their age and city. Respond with a JSON object with keys 'AnonymousName' and 'AnonymousEmail'." -result_df = augini.augment_single(df, 'AnonymousProfile', custom_prompt=anonymize_prompt) -print(result_df) -``` - ## Bringing Knowledge from LLMs Leverage the knowledge embedded in language models to enhance your datasets: @@ -94,9 +113,10 @@ result_df = augini.augment_single(df, 'Recommendations', custom_prompt=recommend print(result_df) ``` -## Full Example +### Anonymizing Data + +You can anonymize sensitive information in your dataset by generating synthetic data: -Here's a full example demonstrating multiple features and custom prompts: ```python from augini import Augini @@ -121,12 +141,116 @@ custom_prompt = "Based on the person's name and age, suggest a quirky pet for th result_df = augini.augment_single(result_df, 'QuirkyPet', custom_prompt=custom_prompt) # Anonymize data -anonymize_prompt = "Create an anonymous profile for the person based on their age and city. Respond with a JSON object with keys 'AnonymousName' and 'AnonymousEmail'." -result_df = augini.augment_single(result_df, 'AnonymousProfile', custom_prompt=anonymize_prompt) +# Initialize Augini with your API key + +from augini import Augini +import pandas as pd +api_key = "OpenAI or OpenRouter" + +# OpenAI +augini = Augini(api_key=api_key, debug=False, use_openrouter=False, model='gpt-4-turbo') + +# OpenRouter +augini = Augini(api_key=api_key, use_openrouter=True, model='meta-llama/llama-3-8b-instruct') + +# Create a sample DataFrame with sensitive information +data = { + 'Name': ['Alice Johnson', 'Bob Smith', 'Charlie Davis'], + 'Age': [28, 34, 45], + 'City': ['New York', 'Los Angeles', 'Chicago'], + 'Email': ['alice.johnson@example.com', 'bob.smith@example.com', 'charlie.davis@example.com'], + 'Phone': ['123-456-7890', '987-654-3210', '555-555-5555'] +} +df = pd.DataFrame(data) + +# Define a general anonymization prompt +anonymize_prompt = ( + "Given the information from the dataset, create an anonymized version that protects individual privacy while maintaining data utility. " + "Follow these guidelines:\n\n" + "1. K-Anonymity: Ensure that each combination of quasi-identifiers (e.g., age, city) appears at least k times in the dataset. " + "Use generalization or suppression techniques as needed.\n" + "2. L-Diversity: For sensitive attributes, ensure there are at least l well-represented values within each equivalence class.\n" + "3. Direct Identifiers: Replace the following with synthetic data:\n" + " - Names: Generate culturally appropriate fictional names\n" + " - Email addresses: Create plausible fictional email addresses\n" + " - Phone numbers: Generate realistic but non-functional phone numbers\n" + "4. Quasi-Identifiers: Apply generalization or suppression as needed:\n" + " - Age: Consider using age ranges instead of exact ages\n" + " - City: Use broader geographic regions if necessary\n" + "5. Sensitive Attributes: Maintain the statistical distribution of sensitive data while ensuring diversity.\n" + "6. Data Consistency: Ensure that the anonymized data remains internally consistent and plausible.\n" + "7. Non-Sensitive Data: Keep unchanged unless required for k-anonymity or l-diversity.\n\n" + "Respond with a JSON object containing the anonymized values for all fields. " + "Ensure the anonymized dataset maintains utility for analysis while protecting individual privacy." +) + +# Use the augment_columns method to anonymize the data +result_df = augini.augment_columns(df, ['Name_A', 'Email_A', 'Age_A', 'City_A'], custom_prompt=anonymize_prompt) + +# Display the resulting DataFrame print(result_df) ``` -## Contributing -We welcome contributions to enhance Augini! Feel free to open issues and submit pull requests on our GitHub repository. +### Automated Data Labeling + +Augini can be used to automatically generate labels for data, enhancing datasets with semantic information. In this example, we use Augini to analyze sentences and generate semantic labels, sentiment analysis, and topic identification: + + +```python +from augini import Augini +import pandas as pd + +# Initialize Augini +api_key = "your_api_key_here" +augini = Augini(api_key=api_key, use_openrouter=True, model='gpt-3.5-turbo') + +# Create a sample DataFrame with sentences +data = { + 'sentence': [ + "The cat sat on the mat.", + "I love to eat pizza on Fridays.", + "The stock market crashed yesterday.", + "She sang beautifully at the concert.", + "The new policy will be implemented next month." + ] +} +df = pd.DataFrame(data) + +# Define custom prompts for labeling +semantic_label_prompt = """ +Analyze the given sentence and provide a semantic label. Choose from the following options: +Statement +Opinion +Fact +Action +Event +Respond with a JSON object containing the key 'semantic_label' and its value. +""" + +sentiment_prompt = """ +Determine the sentiment of the given sentence. Choose from the following options: +Positive +Negative +Neutral +Respond with a JSON object containing the key 'sentiment' and its value. +""" + +topic_prompt = """ +Identify the main topic of the given sentence. Provide a short (1-3 words) topic label. +Respond with a JSON object containing the key 'topic' and its value. +""" + +# Generate labels using Augini +result_df = augini.augment_columns(df, + ['semantic_label', 'sentiment', 'topic'], + custom_prompt=f"Sentence: {{sentence}}\n\n{semantic_label_prompt}\n\n{sentiment_prompt}\n\n{topic_prompt}" +) + +# Display the results +print(result_df) + +# You can also save the results to a CSV file +result_df.to_csv('labeled_sentences.csv', index=False) +``` diff --git a/augini/__init__.py b/augini/__init__.py index ccfeeda..78f4952 100644 --- a/augini/__init__.py +++ b/augini/__init__.py @@ -1,4 +1,4 @@ from .core import Augini +from .exceptions import APIError, DataProcessingError -__version__ = "0.1.0" -__all__ = ["Augini"] \ No newline at end of file +__all__ = ['Augini', 'APIError', 'DataProcessingError'] \ No newline at end of file diff --git a/augini/core.py b/augini/core.py index d980e4c..97a58d7 100644 --- a/augini/core.py +++ b/augini/core.py @@ -9,6 +9,7 @@ from pydantic import BaseModel, ValidationError, root_validator import re from .utils import extract_json, generate_default_prompt +from .exceptions import APIError, DataProcessingError nest_asyncio.apply() @@ -43,10 +44,14 @@ def __init__( base_url: str = "https://openrouter.ai/api/v1", debug: bool = False ): - self.client = AsyncOpenAI( - base_url=base_url if use_openrouter else None, - api_key=api_key - ) + if use_openrouter: + self.client = AsyncOpenAI( + base_url=base_url, + api_key=api_key, + ) + else: + self.client = AsyncOpenAI(api_key=api_key) + self.model_name = model self.temperature = temperature self.max_tokens = max_tokens @@ -157,27 +162,19 @@ def augment_columns(self, df: pd.DataFrame, columns: List[str], custom_prompt: O else: return asyncio.run(self._generate_features(result_df, column_names, prompt_template)) - def augment_columns(self, df: pd.DataFrame, columns: List[str], custom_prompt: Optional[str] = None, use_sync: bool = False) -> pd.DataFrame: + def augment_single(self, df: pd.DataFrame, column_name: str, custom_prompt: Optional[str] = None, use_sync: bool = False) -> pd.DataFrame: result_df = df.copy() available_columns = list(result_df.columns) - column_names = columns if custom_prompt: try: - CustomPromptModel(column_names=column_names, prompt=custom_prompt, available_columns=available_columns) + CustomPromptModel(column_names=[column_name], prompt=custom_prompt, available_columns=available_columns) except ValidationError as e: raise ValueError(f"Custom prompt validation error: {e}") - - prompt_template = custom_prompt or generate_default_prompt(column_names, available_columns) + prompt_template = custom_prompt or generate_default_prompt([column_name], available_columns) if use_sync: - return self._generate_features_sync(result_df, column_names, prompt_template) + return self._generate_features_sync(result_df, [column_name], prompt_template) else: - return asyncio.run(self._generate_features(result_df, column_names, prompt_template)) - -class APIError(Exception): - pass - -class DataProcessingError(Exception): - pass \ No newline at end of file + return asyncio.run(self._generate_features(result_df, [column_name], prompt_template)) \ No newline at end of file diff --git a/directory_contents.md b/directory_contents.md deleted file mode 100644 index 20f008c..0000000 --- a/directory_contents.md +++ /dev/null @@ -1,474 +0,0 @@ -## README.md - -``` -# Augini - -Augini is a Python framework for generating synthetic tabular data using AI. It leverages the power of language models to create realistic, fictional data based on existing datasets. - -## Installation - -You can install Augini using pip: -```sh -pip install augini -``` - -## Quick Start - -Here's a simple example of how to use Augini: - -```python -from augini import Augini -import pandas as pd - -# Initialize Augini -augini = Augini(api_key="your_api_key", use_openrouter=True) - -# Create a sample DataFrame -data = { - 'Place of Birth': ['New York', 'London', 'Tokyo'], - 'Age': [30, 25, 40], - 'Gender': ['Male', 'Female', 'Male'] -} -df = pd.DataFrame(data) - -# Add synthetic features -result_df = augini.augment_columns(df, 'NAME', 'OCCUPATION', 'FAVORITE_DRINK') - -print(result_df) -``` - -## Features - -- Generate synthetic data based on existing datasets -- Customizable prompts for data generation -- Support for both OpenAI API and OpenRouter -- Asynchronous processing for improved performance - -## Extending and Enriching Data - -Augini can be used to extend, augment, and enrich your datasets by adding synthetic features and bringing knowledge from language models to your data. - -### Adding Multiple Features - -You can add multiple features to your DataFrame: - -```python -result_df = augini.augment_columns(df, 'Hobby', 'FavoriteColor', 'FavoriteMovie') -print(result_df) -``` - -### Custom Prompts for Feature Generation - -Custom prompts allow you to generate specific features based on your needs: - -```python -custom_prompt = "Based on the person's name and age, suggest a quirky pet for them. Respond with a JSON object with the key 'QuirkyPet'." -result_df = augini.augment_single(df, 'QuirkyPet', custom_prompt=custom_prompt) -print(result_df) -``` - -### Anonymizing Data - -You can anonymize sensitive information in your dataset by generating synthetic data: - -```python -anonymize_prompt = "Create an anonymous profile for the person based on their age and city. Respond with a JSON object with keys 'AnonymousName' and 'AnonymousEmail'." -result_df = augini.augment_single(df, 'AnonymousProfile', custom_prompt=anonymize_prompt) -print(result_df) -``` - -## Bringing Knowledge from LLMs - -Leverage the knowledge embedded in language models to enhance your datasets: - -### Generating Detailed Descriptions - -```python -description_prompt = "Generate a detailed description for a person based on their age and city. Respond with a JSON object with the key 'Description'." -result_df = augini.augment_single(df, 'Description', custom_prompt=description_prompt) -print(result_df) -``` - -### Suggesting Recommendations - -```python -recommendation_prompt = "Suggest a book and a movie for a person based on their age and city. Respond with a JSON object with keys 'RecommendedBook' and 'RecommendedMovie'." -result_df = augini.augment_single(df, 'Recommendations', custom_prompt=recommendation_prompt) -print(result_df) -``` - -## Full Example - -Here's a full example demonstrating multiple features and custom prompts: - -```python -from augini import Augini -import pandas as pd - -# Initialize Augini -augini = Augini(api_key="your_api_key", use_openrouter=True) - -# Create a sample DataFrame -data = { - 'Name': ['Alice Johnson', 'Bob Smith', 'Charlie Davis'], - 'Age': [28, 34, 45], - 'City': ['New York', 'Los Angeles', 'Chicago'] -} -df = pd.DataFrame(data) - -# Add multiple synthetic features -result_df = augini.augment_columns(df, 'Occupation', 'Hobby', 'FavoriteColor') - -# Add a custom feature -custom_prompt = "Based on the person's name and age, suggest a quirky pet for them. Respond with a JSON object with the key 'QuirkyPet'." -result_df = augini.augment_single(result_df, 'QuirkyPet', custom_prompt=custom_prompt) - -# Anonymize data -anonymize_prompt = "Create an anonymous profile for the person based on their age and city. Respond with a JSON object with keys 'AnonymousName' and 'AnonymousEmail'." -result_df = augini.augment_single(result_df, 'AnonymousProfile', custom_prompt=anonymize_prompt) - -print(result_df) -``` - -## Contributing - -We welcome contributions to enhance Augini! Feel free to open issues and submit pull requests on our GitHub repository. - -``` - -## files.py - -``` -import os -import argparse - -def process_directory(directory_path, output_file, extensions): - with open(output_file, 'w', encoding='utf-8') as outfile: - for root, dirs, files in os.walk(directory_path): - for file in files: - # Check if file has one of the specified extensions - if extensions and not any(file.endswith(ext) for ext in extensions): - continue - - file_path = os.path.join(root, file) - relative_path = os.path.relpath(file_path, directory_path) - - # Write the file path as a Markdown header - outfile.write(f"## {relative_path}\n\n") - - # Write the file content - try: - with open(file_path, 'r', encoding='utf-8') as infile: - content = infile.read() - outfile.write("```\n") - outfile.write(content) - outfile.write("\n```\n\n") - except UnicodeDecodeError: - outfile.write(f"Error reading file: {file_path} (Unicode Decode Error)\n\n") - except Exception as e: - outfile.write(f"Error reading file: {file_path} ({str(e)})\n\n") - -if __name__ == "__main__": - # Setup argument parser - parser = argparse.ArgumentParser(description='Process a directory and output its contents to a Markdown file.') - parser.add_argument('-d', '--directory', type=str, help='Directory to process', default=os.getcwd()) - parser.add_argument('-o', '--output', type=str, help='Output Markdown file', default='directory_contents.md') - parser.add_argument('-e', '--extensions', type=str, nargs='*', help='File extensions to include (e.g., py js)') - - # Parse arguments - args = parser.parse_args() - directory_to_process = args.directory - output_markdown_file = args.output - file_extensions = args.extensions - - # Debugging: Print starting process - print(f"Processing directory: {directory_to_process}") - print(f"File extensions: {file_extensions}") - - if not os.path.isdir(directory_to_process): - print(f"Error: The directory '{directory_to_process}' does not exist.") - else: - process_directory(directory_to_process, output_markdown_file, file_extensions) - print(f"Markdown file '{output_markdown_file}' has been created with the contents of '{directory_to_process}'.") - -``` - -## directory_contents.md - -``` - -``` - -## examples/test_augini.py - -``` -import pandas as pd -from augini import Augini -from augini.exceptions import APIError, DataProcessingError - -def test_augini(): - # Initialize Augini - api_key = "your_api_key" - augini = Augini(api_key=api_key, use_openrouter=True, model='meta-llama/llama-3-8b-instruct', debug=False) - - # Create a sample DataFrame - data = { - 'Name': ['John Doe', 'Jane Smith', 'Bob Johnson'], - 'Age': [30, 25, 45], - 'City': ['New York', 'Los Angeles', 'Chicago'] - } - df = pd.DataFrame(data) - - # Test 1: Add a single feature - try: - result_df = augini.augment_single(df, 'Occupation') - except (APIError, DataProcessingError) as e: - print(f"Test 1 failed: {str(e)}") - - # Test 2: Add multiple features - try: - result_df = augini.augment_columns(df, 'Hobby', 'FavoriteColor') - except (APIError, DataProcessingError) as e: - print(f"Test 2 failed: {str(e)}") - - # Test 3: Add a feature with a custom prompt - try: - custom_prompt = "Based on the person's name and age, suggest a quirky pet for them. Respond with a JSON object with the key 'QuirkyPet'." - result_df = augini.augment_single(df, 'QuirkyPet', custom_prompt=custom_prompt) - except (APIError, DataProcessingError) as e: - print(f"Test 3 failed: {str(e)}") - - # Test 4: Test error handling with an invalid API key - try: - invalid_augini = Augini(api_key="invalid_key", use_openrouter=True) - invalid_augini.augment_single(df, 'InvalidFeature') - except APIError: - print("Test 4 passed: APIError caught as expected") - -if __name__ == "__main__": - test_augini() - -``` - -## augini/__init__.py - -``` -from .core import Augini - -__version__ = "0.1.0" -__all__ = ["Augini"] -``` - -## augini/core.py - -``` -import asyncio -from openai import AsyncOpenAI -import pandas as pd -import numpy as np -from tqdm.auto import tqdm -import nest_asyncio -import logging -from typing import List, Dict, Any, Optional, Tuple -from pydantic import BaseModel, ValidationError, root_validator -import re -from .utils import extract_json, generate_default_prompt - -nest_asyncio.apply() - -logging.basicConfig(level=logging.CRITICAL) -logger = logging.getLogger(__name__) - -class CustomPromptModel(BaseModel): - column_names: List[str] - prompt: str - available_columns: List[str] - - @root_validator(pre=True) - def check_prompt(cls, values): - prompt = values.get('prompt') - available_columns = values.get('available_columns') - placeholders = re.findall(r'{(.*?)}', prompt) - for ph in placeholders: - if ph not in available_columns: - raise ValueError(f"Feature '{ph}' used in custom prompt does not exist in available columns: {available_columns}") - return values - - -class Augini: - def __init__( - self, - api_key: str, - use_openrouter: bool = True, - model: str = "gpt-3.5-turbo", - temperature: float = 0.8, - max_tokens: int = 500, - concurrency_limit: int = 10, - base_url: str = "https://openrouter.ai/api/v1", - debug: bool = False - ): - self.client = AsyncOpenAI( - base_url=base_url if use_openrouter else None, - api_key=api_key - ) - self.model_name = model - self.temperature = temperature - self.max_tokens = max_tokens - self.semaphore = asyncio.Semaphore(concurrency_limit) - self.debug = debug - - if debug: - logger.setLevel(logging.INFO) - logging.getLogger("openai").setLevel(logging.INFO) - logging.getLogger("httpx").setLevel(logging.INFO) - - async def _get_response(self, prompt: str, row_data: Dict[str, Any], feature_names: List[str]) -> str: - async with self.semaphore: - try: - if self.debug: - logger.debug(f"Prompt: {prompt}") - - json_template = "{" + ", ".join(f'"{name}": "FILL"' for name in feature_names) + "}" - system_content = ( - "You are a helpful and very creative assistant that generates hyperrealistic (but fictional) synthetic tabular data based on limited information. " - "Ensure the response is a valid JSON object as it is very important." - ) - user_content = ( - f"{prompt}\n\n" - f"Here is the row data: {row_data}\n\n" - f"Please fill the following JSON template with appropriate values:\n{json_template}" - ) - - if self.debug: - print(f"System content: {user_content}") - logger.debug(f"User content: {user_content}") - - - response = await self.client.chat.completions.create( - model=self.model_name, - messages=[ - {"role": "system", "content": system_content}, - {"role": "user", "content": user_content} - ], - temperature=self.temperature, - max_tokens=self.max_tokens, - response_format={"type": "json_object"} - ) - - response_content = response.choices[0].message.content.strip() - if self.debug: - logger.debug(f"Response: {response_content}") - return response_content - except Exception as e: - logger.error(f"Error: {e}") - raise APIError(f"API request failed: {str(e)}") - - async def _generate_features(self, df: pd.DataFrame, feature_names: List[str], prompt_template: str) -> pd.DataFrame: - async def process_row(index: int, row: pd.Series) -> Tuple[int, Dict[str, Any]]: - try: - row_data = row.to_dict() - prompt = prompt_template.format(**row_data) - logger.debug(f"Processing row {index}: {row_data}") - response = await self._get_response(prompt, row_data, feature_names) - logger.debug(f"Response for row {index}: {response}") - feature_values = extract_json(response) - logger.debug(f"Extracted features for row {index}: {feature_values}") - if not feature_values or not all(feature in feature_values for feature in feature_names): - raise DataProcessingError(f"Expected features are missing in the JSON response: {feature_values}") - return index, feature_values - except Exception as e: - logger.warning(f"Error processing row {index}: {e}") - return index, {feature: np.nan for feature in feature_names} - - tasks = [process_row(index, row) for index, row in df.iterrows()] - results = await asyncio.gather(*tasks) - - # Sort results by index - sorted_results = sorted(results, key=lambda x: x[0]) - - for feature in feature_names: - df[feature] = [result[1].get(feature, np.nan) for result in sorted_results] - return df - - def _generate_features_sync(self, df: pd.DataFrame, feature_names: List[str], prompt_template: str) -> pd.DataFrame: - results = [] - for index, row in tqdm(df.iterrows(), total=len(df), desc="Generating features"): - row_data = row.to_dict() - prompt = prompt_template.format(**row_data) - response = asyncio.run(self._get_response(prompt, row_data, feature_names)) - feature_values = extract_json(response) - results.append(feature_values) - - for feature in feature_names: - df[feature] = [result.get(feature, np.nan) for result in results] - return df - - def augment_columns(self, df: pd.DataFrame, columns: List[str], custom_prompt: Optional[str] = None, use_sync: bool = False) -> pd.DataFrame: - result_df = df.copy() - available_columns = list(result_df.columns) - column_names = columns - - if custom_prompt: - try: - CustomPromptModel(column_names=column_names, prompt=custom_prompt, available_columns=available_columns) - except ValidationError as e: - raise ValueError(f"Custom prompt validation error: {e}") - - prompt_template = custom_prompt or generate_default_prompt(column_names, available_columns) - - if use_sync: - return self._generate_features_sync(result_df, column_names, prompt_template) - else: - return asyncio.run(self._generate_features(result_df, column_names, prompt_template)) - - def augment_columns(self, df: pd.DataFrame, columns: List[str], custom_prompt: Optional[str] = None, use_sync: bool = False) -> pd.DataFrame: - result_df = df.copy() - available_columns = list(result_df.columns) - column_names = columns - - if custom_prompt: - try: - CustomPromptModel(column_names=column_names, prompt=custom_prompt, available_columns=available_columns) - except ValidationError as e: - raise ValueError(f"Custom prompt validation error: {e}") - - - prompt_template = custom_prompt or generate_default_prompt(column_names, available_columns) - - if use_sync: - return self._generate_features_sync(result_df, column_names, prompt_template) - else: - return asyncio.run(self._generate_features(result_df, column_names, prompt_template)) - -class APIError(Exception): - pass - -class DataProcessingError(Exception): - pass -``` - -## augini/utils.py - -``` -import json -import re - -def extract_json(response): - try: - json_str = re.search(r'\{.*\}', response, re.DOTALL).group() - return json.loads(json_str) - except (json.JSONDecodeError, AttributeError): - return None - -def generate_default_prompt(feature_names, available_columns): - column_list = ", ".join(f"{col}: {{{col}}}" for col in available_columns) - features = ", ".join(f'"{feature}": "<{feature}>"' for feature in feature_names) - return (f"Given the following data:\n{column_list}\n" - f"Please provide the following features in a JSON object:\n{features}\n" - "If a feature is not applicable or cannot be determined, use null in the JSON.\n" - "Ensure the response is a valid JSON object as it is very important.") - - - -``` - diff --git a/examples/test_augini.py b/examples/test_augini.py index 6ec1483..9dc5046 100644 --- a/examples/test_augini.py +++ b/examples/test_augini.py @@ -1,45 +1,45 @@ +import unittest import pandas as pd from augini import Augini from augini.exceptions import APIError, DataProcessingError -def test_augini(): - # Initialize Augini - api_key = "your_api_key" - augini = Augini(api_key=api_key, use_openrouter=True, model='meta-llama/llama-3-8b-instruct', debug=False) +class TestAugini(unittest.TestCase): + def setUp(self): + self.api_key = "your_api_key" # Replace with a valid API key for testing + self.augini = Augini(api_key=self.api_key, use_openrouter=True, model='meta-llama/llama-3-8b-instruct', debug=False) + self.df = pd.DataFrame({ + 'Name': ['John Doe', 'Jane Smith', 'Bob Johnson'], + 'Age': [30, 25, 45], + 'City': ['New York', 'Los Angeles', 'Chicago'] + }) - # Create a sample DataFrame - data = { - 'Name': ['John Doe', 'Jane Smith', 'Bob Johnson'], - 'Age': [30, 25, 45], - 'City': ['New York', 'Los Angeles', 'Chicago'] - } - df = pd.DataFrame(data) + def test_augment_single(self): + try: + result_df = self.augini.augment_columns(self.df, ['Occupation']) + self.assertIn('Occupation', result_df.columns) + except (APIError, DataProcessingError) as e: + self.fail(f"augment_single raised {type(e).__name__} unexpectedly: {str(e)}") - # Test 1: Add a single feature - try: - result_df = augini.augment_single(df, 'Occupation') - except (APIError, DataProcessingError) as e: - print(f"Test 1 failed: {str(e)}") + def test_augment_multiple(self): + try: + result_df = self.augini.augment_columns(self.df, ['Hobby', 'FavoriteColor']) + self.assertIn('Hobby', result_df.columns) + self.assertIn('FavoriteColor', result_df.columns) + except (APIError, DataProcessingError) as e: + self.fail(f"augment_columns raised {type(e).__name__} unexpectedly: {str(e)}") - # Test 2: Add multiple features - try: - result_df = augini.augment_columns(df, 'Hobby', 'FavoriteColor') - except (APIError, DataProcessingError) as e: - print(f"Test 2 failed: {str(e)}") - - # Test 3: Add a feature with a custom prompt - try: + def test_custom_prompt(self): custom_prompt = "Based on the person's name and age, suggest a quirky pet for them. Respond with a JSON object with the key 'QuirkyPet'." - result_df = augini.augment_single(df, 'QuirkyPet', custom_prompt=custom_prompt) - except (APIError, DataProcessingError) as e: - print(f"Test 3 failed: {str(e)}") + try: + result_df = self.augini.augment_columns(self.df, ['QuirkyPet'], custom_prompt=custom_prompt) + self.assertIn('QuirkyPet', result_df.columns) + except (APIError, DataProcessingError) as e: + self.fail(f"augment_columns with custom prompt raised {type(e).__name__} unexpectedly: {str(e)}") - # Test 4: Test error handling with an invalid API key - try: + def test_invalid_api_key(self): invalid_augini = Augini(api_key="invalid_key", use_openrouter=True) - invalid_augini.augment_single(df, 'InvalidFeature') - except APIError: - print("Test 4 passed: APIError caught as expected") + with self.assertRaises(APIError): + invalid_augini.augment_columns(self.df, ['InvalidFeature']) -if __name__ == "__main__": - test_augini() +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7db6817..386459e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,9 @@ [project] name = "augini" -version = "0.1.0" +version = "0.2.0" authors = [ - { name = "Vadim", email = "vadim@tabularis.ai" }, + { name = "Vadim Borisov", email = "vadim@tabularis.ai" }, ] description = "A framework for generating synthetic tabular data using AI" readme = "README.md" @@ -21,7 +21,7 @@ classifiers = [ ] keywords = ["augini", "synthetic data", "tabular data", "AI", "data generation", "OpenAI", "OpenRouter"] dependencies = [ - "openai", + "openai>=1.35.13", "pandas", "numpy<2.0.0", "tqdm", diff --git a/requirements.txt b/requirements.txt index 5a3daa0..8497eef 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -openai +openai>=1.35.13 pandas tqdm numpy<2.0.0