-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmemory.py
52 lines (45 loc) · 1.47 KB
/
memory.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import numpy as np
class ReplayBuffer:
def __init__(self, batch_size):
self.states = []
self.probs = []
self.actions = []
self.rewards = []
self.dones = []
self.new_states = []
self.batch_size = batch_size
def recall(self):
return (
np.array(self.states),
np.array(self.new_states),
np.array(self.actions),
np.array(self.probs),
np.array(self.rewards),
np.array(self.dones),
)
def generate_batches(self):
n_states = len(self.states)
# batch_start = np.arange(0, n_states, self.batch_size)
n_batches = int(n_states // self.batch_size)
indices = np.arange(n_states, dtype=np.int64)
np.random.shuffle(indices)
# batches = [indices[i:i+self.batch_size] for i in batch_start]
batches = [
indices[i * self.batch_size : (i + 1) * self.batch_size]
for i in range(n_batches)
]
return batches
def store_memory(self, state, state_, action, probs, reward, done):
self.states.append(state)
self.actions.append(action)
self.probs.append(probs)
self.rewards.append(reward)
self.dones.append(done)
self.new_states.append(state_)
def clear_memory(self):
self.states = []
self.probs = []
self.actions = []
self.rewards = []
self.dones = []
self.new_states = []