-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdecision_tree.py
111 lines (84 loc) · 3.96 KB
/
decision_tree.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import numpy as np
from collections import Counter
class Node:
def __init__(self, feature=None, threshold=None, left=None, right=None, *,value=None):
self.feature = feature
self.threshold = threshold
self.left = left
self.right = right
self.value = value
def is_leaf_node(self):
return self.value is not None
class DecisionTree:
def __init__(self, min_samples_split=2, max_depth=1000, n_features=None):
self.min_samples_split = min_samples_split
self.max_depth = max_depth
self.n_features = n_features
self.root = None
def fit(self, X, y):
self.n_features = X.shape[1] if not self.n_features else min(X.shape[1], self.n_features)
self.root = self._grow_tree(X, y)
def _grow_tree(self, X, y, depth=0):
n_samples, n_feats = X.shape
n_labels = np.unique(y)
if (depth>=self.max_depth or len(n_labels)==1 or n_samples<self.min_samples_split):
leaf_value = self._most_common_label(y)
return Node(value=leaf_value)
feat_idxs = np.random.choice(n_feats, self.n_features, replace=False)
best_feature, best_thresh = self._best_split(X, y, feat_idxs)
# Check if a valid split was found
if best_feature is None or best_thresh is None:
leaf_value = self._most_common_label(y)
return Node(value=leaf_value)
left_idxs, right_idxs = self._split(X[:, best_feature], best_thresh)
# Safeguard against empty splits
if len(left_idxs) == 0 or len(right_idxs) == 0:
leaf_value = self._most_common_label(y)
return Node(value=leaf_value)
left = self._grow_tree(X[left_idxs, :], y[left_idxs], depth + 1)
right = self._grow_tree(X[right_idxs, :], y[right_idxs], depth + 1)
return Node(best_feature, best_thresh, left, right)
def _most_common_label(self, y):
counter = Counter(y)
value = counter.most_common(1)[0][0]
return value
def _best_split(self, X, y, feat_idxs):
best_gain = -1
split_idx, split_threshold = None, None
for feat_idx in feat_idxs:
X_Column = X[:, feat_idx]
thresholds = np.unique(X_Column)
for thr in thresholds:
gain = self._information_gain(y, X_Column, thr)
if gain > best_gain:
best_gain = gain
split_idx = feat_idx
split_threshold = thr
return split_idx, split_threshold
def _information_gain(self, y, X_Column, threshold):
parent_entropy = self._entropy(y)
left_idxs, right_idxs = self._split(X_Column, threshold)
if len(left_idxs) == 0 or len(right_idxs) == 0:
return 0
n = len(y)
n_left, n_right = len(left_idxs), len(right_idxs)
entropy_left, entropy_right = self._entropy(y[left_idxs]), self._entropy(y[right_idxs])
child_entropy = (n_left/n) * entropy_left + (n_right/n) * entropy_right
information_gain = parent_entropy - child_entropy
return information_gain
def _entropy(self, y):
hist = np.bincount(y)
ps = hist/len(y)
return -np.sum([p * np.log(p) for p in ps if p>0])
def _split(self, X_Column, split_thresh):
left_idx = np.argwhere(X_Column <= split_thresh).flatten()
right_idx = np.argwhere(X_Column > split_thresh).flatten()
return left_idx, right_idx
def predict(self, X):
return np.array([self._traverse_tree(x, self.root) for x in X])
def _traverse_tree(self, x, node):
if node.is_leaf_node():
return node.value
if x[node.feature] <= node.threshold:
return self._traverse_tree(x, node.left)
return self._traverse_tree(x, node.right)