forked from b0kch01/llama-cpu
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexample.py
executable file
·101 lines (73 loc) · 3.19 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Tuple
import os
import sys
import torch
import fire
import time
import json
from pathlib import Path
from fairscale.nn.model_parallel.initialize import initialize_model_parallel
from llama import ModelArgs, Transformer, Tokenizer, LLaMA
def setup_model_parallel() -> Tuple[int, int]:
local_rank = int(os.environ.get("LOCAL_RANK", -1))
world_size = int(os.environ.get("WORLD_SIZE", -1))
# torch.distributed.init_process_group("gloo")
# initialize_model_parallel(world_size)
# torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(1)
return local_rank, world_size
def load(ckpt_dir: str, tokenizer_path: str, local_rank: int, world_size: int) -> LLaMA:
start_time = time.time()
print("Locating checkpoints")
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert (
world_size == len(checkpoints)
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {world_size}"
print(f"Found MP={len(checkpoints)} checkpoints")
ckpt_path = checkpoints[local_rank]
print("Creating checkpoint instance...")
checkpoint = torch.load(ckpt_path, map_location="cpu")
print("Grabbing params...")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
print("Loading model arguments...")
model_args: ModelArgs = ModelArgs(
max_seq_len=1024, max_batch_size=32, **params)
print("Creating tokenizer...")
tokenizer = Tokenizer(model_path=tokenizer_path)
model_args.vocab_size = tokenizer.n_words
print("Creating transformer...")
torch.set_default_tensor_type(torch.BFloat16Tensor)
model = Transformer(model_args)
print("Loading checkpoint to model...", end="")
_start_time = time.time()
torch.set_default_tensor_type(torch.BFloat16Tensor)
model.load_state_dict(checkpoint, strict=False)
print(f"done in {time.time() - _start_time:.2f} seconds")
_start_time = time.time()
print("Creating LLaMA generator...", end="")
generator = LLaMA(model, tokenizer)
print(f"done in {time.time() - _start_time:.2f} seconds")
print(f"Loaded in {time.time() - start_time:.2f} seconds")
return generator
def main(ckpt_dir: str, tokenizer_path: str, temperature: float = 0.8, top_p: float = 0.95):
local_rank, world_size = setup_model_parallel()
if local_rank > 0:
sys.stdout = open(os.devnull, 'w')
generator = load(ckpt_dir, tokenizer_path, local_rank, world_size)
prompts = [input("Enter prompt: ")]
print("Starting generation with prompt:", prompts[0])
while True:
start_time = time.time()
results = generator.generate(
prompts, max_gen_len=30, temperature=temperature, top_p=top_p)
print(f"responded in {time.time() - start_time:.2f} seconds")
for result in results:
print(result)
print("\n==================================\n")
prompts = [input("Enter next prompt: ")]
if __name__ == "__main__":
fire.Fire(main)