Skip to content

Commit 4915f55

Browse files
committed
Added original models.
1 parent 8c8cfee commit 4915f55

File tree

4 files changed

+73
-39
lines changed

4 files changed

+73
-39
lines changed

doc_generator/main.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -45,26 +45,26 @@ def main(): # pragma: no cover
4545
completer=mode_completer)
4646

4747
model_completer = WordCompleter(
48-
[LLMModels.LLAMA2_7B_CHAT_GPTQ.value,
49-
LLMModels.LLAMA2_13B_CHAT_GPTQ.value,
50-
LLMModels.CODELLAMA_7B_INSTRUCT_GPTQ.value,
51-
LLMModels.CODELLAMA_13B_INSTRUCT_GPTQ.value,
48+
[LLMModels.LLAMA2_7B_CHAT_HF.value,
49+
LLMModels.LLAMA2_13B_CHAT_HF.value,
50+
LLMModels.CODELLAMA_7B_INSTRUCT_HF.value,
51+
LLMModels.CODELLAMA_13B_INSTRUCT_HF.value,
5252
LLMModels.GOOGLE_GEMMA_2B_INSTRUCT.value,
5353
LLMModels.GOOGLE_GEMMA_7B_INSTRUCT.value])
5454
model_name = prompt("Which model?\n", completer=model_completer)
5555
match model_name:
56-
case LLMModels.LLAMA2_13B_CHAT_GPTQ.value:
57-
model = LLMModels.LLAMA2_13B_CHAT_GPTQ
58-
case LLMModels.CODELLAMA_7B_INSTRUCT_GPTQ.value:
59-
model = LLMModels.CODELLAMA_7B_INSTRUCT_GPTQ
60-
case LLMModels.CODELLAMA_13B_INSTRUCT_GPTQ.value:
61-
model = LLMModels.CODELLAMA_13B_INSTRUCT_GPTQ
56+
case LLMModels.LLAMA2_13B_CHAT_HF.value:
57+
model = LLMModels.LLAMA2_13B_CHAT_HF
58+
case LLMModels.CODELLAMA_7B_INSTRUCT_HF.value:
59+
model = LLMModels.CODELLAMA_7B_INSTRUCT_HF
60+
case LLMModels.CODELLAMA_13B_INSTRUCT_HF.value:
61+
model = LLMModels.CODELLAMA_13B_INSTRUCT_HF
6262
case LLMModels.GOOGLE_GEMMA_2B_INSTRUCT.value:
6363
model = LLMModels.GOOGLE_GEMMA_2B_INSTRUCT
6464
case LLMModels.GOOGLE_GEMMA_7B_INSTRUCT.value:
6565
model = LLMModels.GOOGLE_GEMMA_7B_INSTRUCT
6666
case _:
67-
model = LLMModels.LLAMA2_7B_CHAT_GPTQ
67+
model = LLMModels.LLAMA2_7B_CHAT_HF
6868
print("Initialization Complete.\n")
6969

7070
repo_config = {

doc_generator/query/query.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def generate_readme(repo_config: AutodocRepoConfig,
7777
clear()
7878

7979
print('Generating README...')
80-
readme_path = os.path.join(data_path, "README.md")
80+
readme_path = os.path.join(data_path, f"README.md")
8181
with open(readme_path, "w", encoding='utf-8') as file:
8282
file.write(f"# {repo_config.name}")
8383

doc_generator/types.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,14 @@ class LLMModels(str, Enum):
1111
GPT3 = "gpt-3.5-turbo"
1212
GPT4 = "gpt-4"
1313
GPT432k = "gpt-4-32k"
14-
LLAMA2_7B_CHAT_GPTQ = "TheBloke/Llama-2-7B-Chat-GPTQ"
15-
LLAMA2_13B_CHAT_GPTQ = "TheBloke/Llama-2-13B-Chat-GPTQ"
16-
CODELLAMA_7B_INSTRUCT_GPTQ = "TheBloke/CodeLlama-7B-Instruct-GPTQ"
17-
CODELLAMA_13B_INSTRUCT_GPTQ = "TheBloke/CodeLlama-13B-Instruct-GPTQ"
14+
# LLAMA2_7B_CHAT_GPTQ = "TheBloke/Llama-2-7B-Chat-GPTQ"
15+
# LLAMA2_13B_CHAT_GPTQ = "TheBloke/Llama-2-13B-Chat-GPTQ"
16+
# CODELLAMA_7B_INSTRUCT_GPTQ = "TheBloke/CodeLlama-7B-Instruct-GPTQ"
17+
# CODELLAMA_13B_INSTRUCT_GPTQ = "TheBloke/CodeLlama-13B-Instruct-GPTQ"
18+
LLAMA2_7B_CHAT_HF = "meta-llama/Llama-2-7b-chat-hf"
19+
LLAMA2_13B_CHAT_HF = "meta-llama/Llama-2-13b-chat-hf"
20+
CODELLAMA_7B_INSTRUCT_HF = "meta-llama/CodeLlama-7b-Instruct-hf"
21+
CODELLAMA_13B_INSTRUCT_HF = "meta-llama/CodeLlama-13b-Instruct-hf"
1822
GOOGLE_GEMMA_2B_INSTRUCT = "google/gemma-2b-it"
1923
GOOGLE_GEMMA_7B_INSTRUCT = "google/gemma-7b-it"
2024

doc_generator/utils/llm_utils.py

Lines changed: 53 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -132,21 +132,21 @@ def get_openai_api_key():
132132
failed=0,
133133
total=0,
134134
),
135-
LLMModels.LLAMA2_7B_CHAT_GPTQ: LLMModelDetails(
136-
name=LLMModels.LLAMA2_7B_CHAT_GPTQ,
137-
input_cost_per_1k_tokens=0,
138-
output_cost_per_1k_tokens=0,
139-
max_length=4096,
140-
llm=get_llama_chat_model(
141-
LLMModels.LLAMA2_7B_CHAT_GPTQ.value,
142-
model_kwargs={"temperature": 0}
143-
),
144-
input_tokens=0,
145-
output_tokens=0,
146-
succeeded=0,
147-
failed=0,
148-
total=0,
149-
),
135+
# LLMModels.LLAMA2_7B_CHAT_GPTQ: LLMModelDetails(
136+
# name=LLMModels.LLAMA2_7B_CHAT_GPTQ,
137+
# input_cost_per_1k_tokens=0,
138+
# output_cost_per_1k_tokens=0,
139+
# max_length=4096,
140+
# llm=get_llama_chat_model(
141+
# LLMModels.LLAMA2_7B_CHAT_GPTQ.value,
142+
# model_kwargs={"temperature": 0}
143+
# ),
144+
# input_tokens=0,
145+
# output_tokens=0,
146+
# succeeded=0,
147+
# failed=0,
148+
# total=0,
149+
# ),
150150
# LLMModels.LLAMA2_13B_CHAT_GPTQ: LLMModelDetails(
151151
# name=LLMModels.LLAMA2_13B_CHAT_GPTQ,
152152
# input_cost_per_1k_tokens=0,
@@ -162,13 +162,43 @@ def get_openai_api_key():
162162
# failed=0,
163163
# total=0,
164164
# ),
165-
LLMModels.GOOGLE_GEMMA_2B_INSTRUCT: LLMModelDetails(
166-
name=LLMModels.GOOGLE_GEMMA_2B_INSTRUCT,
165+
# LLMModels.GOOGLE_GEMMA_2B_INSTRUCT: LLMModelDetails(
166+
# name=LLMModels.GOOGLE_GEMMA_2B_INSTRUCT,
167+
# input_cost_per_1k_tokens=0,
168+
# output_cost_per_1k_tokens=0,
169+
# max_length=8192,
170+
# llm=get_gemma_chat_model(
171+
# LLMModels.GOOGLE_GEMMA_2B_INSTRUCT.value,
172+
# model_kwargs={"temperature": 0}
173+
# ),
174+
# input_tokens=0,
175+
# output_tokens=0,
176+
# succeeded=0,
177+
# failed=0,
178+
# total=0,
179+
# ),
180+
# LLMModels.GOOGLE_GEMMA_7B_INSTRUCT: LLMModelDetails(
181+
# name=LLMModels.GOOGLE_GEMMA_7B_INSTRUCT,
182+
# input_cost_per_1k_tokens=0,
183+
# output_cost_per_1k_tokens=0,
184+
# max_length=8192,
185+
# llm=get_gemma_chat_model(
186+
# LLMModels.GOOGLE_GEMMA_7B_INSTRUCT.value,
187+
# model_kwargs={"temperature": 0}
188+
# ),
189+
# input_tokens=0,
190+
# output_tokens=0,
191+
# succeeded=0,
192+
# failed=0,
193+
# total=0,
194+
# ),
195+
LLMModels.CODELLAMA_7B_INSTRUCT_HF: LLMModelDetails(
196+
name=LLMModels.CODELLAMA_7B_INSTRUCT_HF,
167197
input_cost_per_1k_tokens=0,
168198
output_cost_per_1k_tokens=0,
169199
max_length=8192,
170-
llm=get_gemma_chat_model(
171-
LLMModels.GOOGLE_GEMMA_2B_INSTRUCT.value,
200+
llm=get_llama_chat_model(
201+
LLMModels.CODELLAMA_7B_INSTRUCT_HF.value,
172202
model_kwargs={"temperature": 0}
173203
),
174204
input_tokens=0,
@@ -177,13 +207,13 @@ def get_openai_api_key():
177207
failed=0,
178208
total=0,
179209
),
180-
# LLMModels.GOOGLE_GEMMA_7B_INSTRUCT: LLMModelDetails(
181-
# name=LLMModels.GOOGLE_GEMMA_7B_INSTRUCT,
210+
# LLMModels.CODELLAMA_13B_INSTRUCT_HF: LLMModelDetails(
211+
# name=LLMModels.CODELLAMA_13B_INSTRUCT_HF,
182212
# input_cost_per_1k_tokens=0,
183213
# output_cost_per_1k_tokens=0,
184214
# max_length=8192,
185-
# llm=get_gemma_chat_model(
186-
# LLMModels.GOOGLE_GEMMA_7B_INSTRUCT.value,
215+
# llm=get_llama_chat_model(
216+
# LLMModels.CODELLAMA_13B_INSTRUCT_HF.value,
187217
# model_kwargs={"temperature": 0}
188218
# ),
189219
# input_tokens=0,

0 commit comments

Comments
 (0)