forked from AliceDudu/Named-Entity-Recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
q1_softmax.py
104 lines (87 loc) · 3.31 KB
/
q1_softmax.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import numpy as np
import tensorflow as tf
def softmax(x):
"""
Compute the softmax function in tensorflow.
You might find the tensorflow functions tf.exp, tf.reduce_max,
tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
not need to use all of these functions). Recall also that many common
tensorflow operations are sugared (e.g. x * y does a tensor multiplication
if x and y are both tensors). Make sure to implement the numerical stability
fixes as in the previous homework!
Args:
x: tf.Tensor with shape (n_samples, n_features). Note feature vectors are
represented by row-vectors. (For simplicity, no need to handle 1-d
input as in the previous homework)
Returns:
out: tf.Tensor with shape (n_sample, n_features). You need to construct this
tensor in this problem.
"""
### YOUR CODE HERE
maxes = tf.expand_dims(tf.reduce_max(x, reduction_indices=[1]), 1)
x_red = x - maxes
x_exp = tf.exp(x_red)
sums = tf.expand_dims(tf.reduce_sum(x_exp, reduction_indices=[1]), 1)
out = x_exp / sums
### END YOUR CODE
return out
def cross_entropy_loss(y, yhat):
"""
Compute the cross entropy loss in tensorflow.
y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
be of dtype tf.float32.
The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
solutions are possible, so you may not need to use all of these functions).
Note: You are NOT allowed to use the tensorflow built-in cross-entropy
functions.
Args:
y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
probability distribution and should sum to 1.
Returns:
out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
tensor in the problem.
"""
### YOUR CODE HERE
y = tf.to_float(y)
out = -tf.reduce_sum(y * tf.log(yhat))
### END YOUR CODE
return out
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(tf.convert_to_tensor(
np.array([[1001,1002],[3,4]]), dtype=tf.float32))
with tf.Session():
test1 = test1.eval()
assert np.amax(np.fabs(test1 - np.array(
[0.26894142, 0.73105858]))) <= 1e-6
test2 = softmax(tf.convert_to_tensor(
np.array([[-1001,-1002]]), dtype=tf.float32))
with tf.Session():
test2 = test2.eval()
assert np.amax(np.fabs(test2 - np.array(
[0.73105858, 0.26894142]))) <= 1e-6
print "Basic (non-exhaustive) softmax tests pass\n"
def test_cross_entropy_loss_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
y = np.array([[0, 1], [1, 0], [1, 0]])
yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])
test1 = cross_entropy_loss(
tf.convert_to_tensor(y, dtype=tf.int32),
tf.convert_to_tensor(yhat, dtype=tf.float32))
with tf.Session():
test1 = test1.eval()
result = -3 * np.log(.5)
assert np.amax(np.fabs(test1 - result)) <= 1e-6
print "Basic (non-exhaustive) cross-entropy tests pass\n"
if __name__ == "__main__":
test_softmax_basic()
test_cross_entropy_loss_basic()