-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathexample.py
More file actions
36 lines (26 loc) · 1.11 KB
/
example.py
File metadata and controls
36 lines (26 loc) · 1.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
from quantize import int4, any4, int8
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "meta-llama/Llama-3.2-1B"
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16).to(device)
tokenizer = AutoTokenizer.from_pretrained(model_name)
streamer = TextStreamer(tokenizer)
model.eval()
prompt = "Once upon a time"
inputs = tokenizer(prompt, return_tensors="pt").to(device)
print("\nWarmup...")
_ = model.generate(**inputs, max_new_tokens=4)
print("\nBaseline:")
outputs = model.generate(**inputs, streamer=streamer, do_sample=True, max_new_tokens=256)
text = tokenizer.batch_decode(outputs)[0]
print("\nQuantize:")
model = any4(model)
outputs = model.generate(**inputs, streamer=streamer, do_sample=True, max_new_tokens=256)
text = tokenizer.batch_decode(outputs)[0]