-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataset.py
55 lines (47 loc) · 2.02 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
import json
import numpy as np
import torch
from torch.utils.data import Dataset
from transformers import GPT2Tokenizer
from utils import add_special_tokens
class GPT21024Dataset(Dataset):
def __init__(self, root_dir, ids_file, mode='train',length=None):
self.root_dir = root_dir
self.tokenizer = add_special_tokens()
# with open(ids_file,'r') as f:
# if mode=='train':
# self.idxs = np.array(json.load(f)['train_ids'])
# elif mode=='valid':
# self.idxs = np.array(json.load(f)['valid_ids'])
# elif mode=='test':
# self.idxs = np.array(json.load(f)['test_ids'])
# self.idxs = self.idxs -min(self.idxs)
self.idxs = os.listdir(root_dir)
self.mode = mode
if len == None:
self.len = len(self.idxs)
else:
self.len = length
def __len__(self):
return self.len
def __getitem__(self,idx):
if self.mode=='valid':
idx = self.idxs[-idx]
elif self.mode=='test':
idx = self.idxs[-idx-self.len] #assuming valid and test set of same sizes
else:
idx = self.idxs[idx]
# file_name = os.path.join(self.root_dir,str(idx)+".json")
file_name = os.path.join(self.root_dir,str(idx))
with open(file_name,'r') as f:
data = json.load(f)
text = self.tokenizer.encode(self.tokenizer.pad_token)*1024
post = self.tokenizer.encode(self.tokenizer.pad_token)*901
# post[:len(data['article'] + self.tokenizer.encode(self.tokenizer.eos_token))] = data['article'] + self.tokenizer.encode(self.tokenizer.eos_token)
post[:len(data['article'])] = data['article']
content = post + self.tokenizer.encode(self.tokenizer.sep_token) + data['abstract'] + self.tokenizer.encode(self.tokenizer.eos_token)
text[:len(content)] = content
text = torch.tensor(text)
sample = {'article': text, 'sum_idx': len(post)}
return sample