-
Notifications
You must be signed in to change notification settings - Fork 20
/
utils_sample.py
73 lines (59 loc) · 2.59 KB
/
utils_sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import torch
import random
import numpy as np
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0 and torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
def collate_fn_sample(batch):
max_len = max([len(f["input_ids"]) for f in batch])
input_ids = [f["input_ids"] + [0] * (max_len - len(f["input_ids"])) for f in batch]
input_mask = [[1.0] * len(f["input_ids"]) + [0.0] * (max_len - len(f["input_ids"])) for f in batch]
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.float)
entity_pos = [f["entity_pos"] for f in batch]
negative_alpha = 8
positive_alpha = 1
labels, hts = [], []
for f in batch:
randnum = random.randint(0, 1000000)
pos_hts = f['pos_hts']
pos_labels = f['pos_labels']
neg_hts = f['neg_hts']
neg_labels = f['neg_labels']
if negative_alpha > 0:
random.seed(randnum)
random.shuffle(neg_hts)
random.seed(randnum)
random.shuffle(neg_labels)
lower_bound = int(max(20, len(pos_hts) * negative_alpha))
hts.append( pos_hts * positive_alpha + neg_hts[:lower_bound] )
labels.append( pos_labels * positive_alpha + neg_labels[:lower_bound] )
# labels = [f["labels"] for f in batch]
# hts = [f["hts"] for f in batch]
# entity_pos_single = []
# # for f in batch:
# # entity_pos_item = f["entity_pos"]
# # entity_pos2 = []
# # for e in entity_pos_item:
# # entity_pos2.append([])
# # mention_num = len(e)
# # bounds = np.random.randint(mention_num, size=3)
# # for bound in bounds:
# # entity_pos2[-1].append(e[bound])
# # entity_pos_single.append( torch.tensor(entity_pos2) )
output = (input_ids, input_mask, labels, entity_pos, hts, )
return output
def collate_fn(batch):
max_len = max([len(f["input_ids"]) for f in batch])
input_ids = [f["input_ids"] + [0] * (max_len - len(f["input_ids"])) for f in batch]
input_mask = [[1.0] * len(f["input_ids"]) + [0.0] * (max_len - len(f["input_ids"])) for f in batch]
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.float)
entity_pos = [f["entity_pos"] for f in batch]
labels = [f["labels"] for f in batch]
hts = [f["hts"] for f in batch]
output = (input_ids, input_mask, labels, entity_pos, hts )
return output