Skip to content

Commit cdcf511

Browse files
Merge pull request #44 from Supahands/chore-add_supa_ollama_models
Chore add supa ollama models
2 parents 47f17d2 + 7a83b1d commit cdcf511

File tree

2 files changed

+11
-7
lines changed

2 files changed

+11
-7
lines changed

hugging_face_to_guff.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def download_model(
8585
private: bool = False,
8686
ollama_upload: bool = False,
8787
hf_upload: bool = False,
88-
clean_run: bool = False, # New parameter
88+
clean_run: bool = False,
8989
):
9090
logger.info(f"Downloading model {model_id}...")
9191
import subprocess
@@ -159,8 +159,8 @@ def convert_to_gguf(
159159
branch: str = "",
160160
filter_path: str = "",
161161
ollama_upload: bool = False,
162-
hf_upload: bool = False, # New parameter
163-
clean_run: bool = False, # New parameter
162+
hf_upload: bool = False,
163+
clean_run: bool = False,
164164
):
165165
"""Convert model to GGUF format with multiple quantization types and push to Ollama"""
166166
logger.info(f"Converting model with quantization types: {quanttypes}")
@@ -246,7 +246,7 @@ def push_to_ollama(
246246
modelname: str,
247247
source_model_id: str,
248248
username: str,
249-
clean_run: bool = False, # New parameter
249+
clean_run: bool = False,
250250
):
251251
"""Push converted models to Ollama using tags for different quantizations"""
252252
logger.info("Pushing models to Ollama...")
@@ -375,7 +375,7 @@ def main(
375375
private: bool = False,
376376
ollama_upload: bool = False,
377377
hf_upload: bool = False,
378-
clean_run: bool = False, # New parameter
378+
clean_run: bool = False,
379379
):
380380
logger.info(f"Starting conversion process for {modelowner}/{modelname}")
381381
converter = ModelConverter()

ollama_service.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,18 @@
1515
# Default server port.
1616
MODEL_IDS: list[str] = [
1717
"llama3",
18+
"llama3.1",
1819
"llama3.2",
1920
"mistral",
2021
"gemma2",
2122
"qwen2.5",
23+
"yi",
2224
"aisingapore/gemma2-9b-cpt-sea-lionv3-instruct",
2325
"hf.co/Supa-AI/malaysian-Llama-3.2-3B-Instruct-Q8_0-GGUF",
24-
"hf.co/Supa-AI/gemma2-9b-cpt-sahabatai-v1-instruct-q8_0-gguf",
25-
"hf.co/Supa-AI/llama3-8b-cpt-sahabatai-v1-instruct-q8_0-gguf"
26+
"Supa-AI/gemma2-9b-cpt-sahabatai-v1-instruct:q8_0",
27+
"Supa-AI/llama3-8b-cpt-sahabatai-v1-instruct:q8_0",
28+
"Supa-AI/gemma2-9b-cpt-sahabatai-v1-base:q8_0",
29+
"Supa-AI/ministral-8b-instruct-2410:q8_0"
2630
]
2731

2832
OLLAMA_PORT: int = 11434

0 commit comments

Comments
 (0)