Skip to content

Commit

Permalink
Merge pull request #45 from Supahands/develop
Browse files Browse the repository at this point in the history
Develop to main: new ollama models
  • Loading branch information
OriginalByteMe authored Nov 22, 2024
2 parents d0af335 + cdcf511 commit ed6a5d9
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 7 deletions.
10 changes: 5 additions & 5 deletions hugging_face_to_guff.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def download_model(
private: bool = False,
ollama_upload: bool = False,
hf_upload: bool = False,
clean_run: bool = False, # New parameter
clean_run: bool = False,
):
logger.info(f"Downloading model {model_id}...")
import subprocess
Expand Down Expand Up @@ -159,8 +159,8 @@ def convert_to_gguf(
branch: str = "",
filter_path: str = "",
ollama_upload: bool = False,
hf_upload: bool = False, # New parameter
clean_run: bool = False, # New parameter
hf_upload: bool = False,
clean_run: bool = False,
):
"""Convert model to GGUF format with multiple quantization types and push to Ollama"""
logger.info(f"Converting model with quantization types: {quanttypes}")
Expand Down Expand Up @@ -336,7 +336,7 @@ def push_to_ollama(
modelname: str,
source_model_id: str,
username: str,
clean_run: bool = False, # New parameter
clean_run: bool = False,
):
"""Push converted models to Ollama using tags for different quantizations"""
logger.info("Pushing models to Ollama...")
Expand Down Expand Up @@ -465,7 +465,7 @@ def main(
private: bool = False,
ollama_upload: bool = False,
hf_upload: bool = False,
clean_run: bool = False, # New parameter
clean_run: bool = False,
):
logger.info(f"Starting conversion process for {modelowner}/{modelname}")
converter = ModelConverter()
Expand Down
8 changes: 6 additions & 2 deletions ollama_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,19 @@
# Default server port.
MODEL_IDS: list[str] = [
"llama3",
"llama3.1",
"llama3.2",
"llama3.2:1b",
"mistral",
"gemma2",
"qwen2.5",
"yi",
"aisingapore/gemma2-9b-cpt-sea-lionv3-instruct",
"hf.co/Supa-AI/malaysian-Llama-3.2-3B-Instruct-Q8_0-GGUF",
"hf.co/Supa-AI/gemma2-9b-cpt-sahabatai-v1-instruct-q8_0-gguf",
"hf.co/Supa-AI/llama3-8b-cpt-sahabatai-v1-instruct-q8_0-gguf"
"Supa-AI/gemma2-9b-cpt-sahabatai-v1-instruct:q8_0",
"Supa-AI/llama3-8b-cpt-sahabatai-v1-instruct:q8_0",
"Supa-AI/gemma2-9b-cpt-sahabatai-v1-base:q8_0",
"Supa-AI/ministral-8b-instruct-2410:q8_0"
]

OLLAMA_PORT: int = 11434
Expand Down

0 comments on commit ed6a5d9

Please sign in to comment.