diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..4eae53f --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,6 @@ +[bumpversion] +current_version = 1.0.1 +commit = True +tag = True + +[bumpversion:file:setup.py] diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e3eebb3..adf9f53 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,11 +3,11 @@ name: Publish to PyPI on: push: branches: - - main # Adjust if your default branch is different + - main jobs: publish: - name: Publish Python Package + name: Bump Version and Publish runs-on: ubuntu-latest steps: @@ -17,12 +17,16 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.12' + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip - pip install build twine + pip install build twine bump2version + + - name: Bump version + run: | + bump2version patch - name: Build package run: | @@ -34,3 +38,9 @@ jobs: TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} run: | python -m twine upload dist/* + + - name: Push changes back to repository + uses: ad-m/github-push-action@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: main diff --git a/README.md b/README.md index 46721b9..416f149 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,22 @@ - Easy-to-use interface for text generation ## Installation -Ensure you have the required libraries installed: +### Using `pip` +You can install `AnyGen` from PyPI: +```bash +pip install -U anygen +``` + +### From Source +Clone the repository and install it manually: +```bash +git clone https://github.com/macabdul9/AnyGen.git +cd AnyGen +pip install . +``` + +### Requirements +Ensure the following libraries are installed: ```bash pip install transformers google-generativeai requests openai ``` @@ -81,7 +96,7 @@ Both OpenAI and Gemini models require an API key stored in a JSON file. Below is `openai_keys.json`: ```json { - "gpt-4o-miini": { + "gpt-4o-mini": { "api_key": "your-openai-api-key", "endpoint": "your_endpoint" } @@ -105,7 +120,7 @@ Both OpenAI and Gemini models require an API key stored in a JSON file. Below is - `beam_size`: The number of beams to use for beam search. ## Contributions -Feel free to submit issues or contribute to this repository! +Feel free to submit issues and/or contribute to this repository! ## License This project is licensed under the MIT License. diff --git a/anygen/__init__.py b/anygen/__init__.py index e69de29..40e1683 100644 --- a/anygen/__init__.py +++ b/anygen/__init__.py @@ -0,0 +1 @@ +from .anygen import AnyGen \ No newline at end of file diff --git a/tests/test_anygen.py b/tests/test_anygen.py index c50296c..d7e7fea 100644 --- a/tests/test_anygen.py +++ b/tests/test_anygen.py @@ -2,14 +2,14 @@ from anygen.anygen import AnyGen def test_anygen(): - prompt = "Once upon a time, in a land far away" + prompt = "Write a Python function to calculate the factorial of a number." # Test Hugging Face Model - # print("Testing Hugging Face Model...") - # hf_model_name = "meta-llama/Llama-3.2-1B-Instruct" # Replace with your Hugging Face model - # hf_generator = AnyGen(model_type="huggingface", model_name_or_path=hf_model_name, device="cpu") - # hf_output = hf_generator.generate(prompt, parameters=None) - # print("Hugging Face Output:", hf_output) + print("Testing Hugging Face Model...") + hf_model_name = "meta-llama/Llama-3.2-1B-Instruct" # Replace with your Hugging Face model + hf_generator = AnyGen(model_type="huggingface", model_name_or_path=hf_model_name, device="cpu") + hf_output = hf_generator.generate(prompt, parameters=None) + print("Hugging Face Output:", hf_output) # Test OpenAI Model print("Testing OpenAI Model...")