@@ -58,7 +58,7 @@ client = Together()
5858
5959# Simple text message
6060response = client.chat.completions.create(
61- model = " mistralai/Mixtral-8x7B-Instruct-v0.1 " ,
61+ model = " meta-llama/Llama-4-Scout-17B-16E-Instruct " ,
6262 messages = [{" role" : " user" , " content" : " tell me about new york" }],
6363)
6464print (response.choices[0 ].message.content)
@@ -148,7 +148,7 @@ from together import Together
148148
149149client = Together()
150150stream = client.chat.completions.create(
151- model = " mistralai/Mixtral-8x7B-Instruct-v0.1 " ,
151+ model = " meta-llama/Llama-4-Scout-17B-16E-Instruct " ,
152152 messages = [{" role" : " user" , " content" : " tell me about new york" }],
153153 stream = True ,
154154)
@@ -173,7 +173,7 @@ async def async_chat_completion(messages):
173173 async_client = AsyncTogether()
174174 tasks = [
175175 async_client.chat.completions.create(
176- model = " mistralai/Mixtral-8x7B-Instruct-v0.1 " ,
176+ model = " meta-llama/Llama-4-Scout-17B-16E-Instruct " ,
177177 messages = [{" role" : " user" , " content" : message}],
178178 )
179179 for message in messages
@@ -196,7 +196,7 @@ from together import Together
196196client = Together()
197197
198198response = client.chat.completions.create(
199- model = " mistralai/Mixtral-8x7B-Instruct-v0.1 " ,
199+ model = " meta-llama/Llama-4-Scout-17B-16E-Instruct " ,
200200 messages = [{" role" : " user" , " content" : " tell me about new york" }],
201201 logprobs = 1
202202)
@@ -356,7 +356,7 @@ client = Together()
356356
357357client.fine_tuning.create(
358358 training_file = ' file-d0d318cb-b7d9-493a-bd70-1cfe089d3815' ,
359- model = ' mistralai/Mixtral-8x7B-Instruct-v0.1 ' ,
359+ model = ' meta-llama/Llama-4-Scout-17B-16E-Instruct ' ,
360360 n_epochs = 3 ,
361361 n_checkpoints = 1 ,
362362 batch_size = " max" ,
@@ -394,7 +394,7 @@ for model in models:
394394together chat.completions \
395395 --message " system" " You are a helpful assistant named Together" \
396396 --message " user" " What is your name?" \
397- --model mistralai/Mixtral-8x7B-Instruct-v0.1
397+ --model meta-llama/Llama-4-Scout-17B-16E-Instruct
398398```
399399
400400The Chat Completions CLI enables streaming tokens to stdout by default. To disable streaming, use ` --no-stream ` .
@@ -404,7 +404,7 @@ The Chat Completions CLI enables streaming tokens to stdout by default. To disab
404404``` bash
405405together completions \
406406 " Large language models are " \
407- --model mistralai/Mixtral-8x7B-v0.1 \
407+ --model meta-llama/Llama-4-Scout-17B-16E-Instruct \
408408 --max-tokens 512 \
409409 --stop " ."
410410```
0 commit comments