-
Notifications
You must be signed in to change notification settings - Fork 0
/
logistic_regression.py
108 lines (96 loc) · 2.95 KB
/
logistic_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Logistic Regression Model
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import ML libaries
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
from matplotlib.colors import ListedColormap
# load dataset
dataset = pd.read_csv("Social_Network_Ads.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
# splitting the dataset into the training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=0
)
# feature scaling the data
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# training logistic regression model
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
# predicting a new result
classifier.predict(sc.transform([[30, 87000]]))
print(classifier)
# predicting test set results
y_pred = classifier.predict(X_test)
print(
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1)
)
# making the confusion matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
# visualize training set results
X_set, y_set = sc.inverse_transform(X_train), y_train
X1, X2 = np.meshgrid(
np.arange(start=X_set[:, 0].min() - 10, stop=X_set[:, 0].max() + 10, step=0.25),
np.arange(start=X_set[:, 1].min() - 1000, stop=X_set[:, 1].max() + 1000, step=0.25),
)
plt.contourf(
X1,
X2,
classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(
X1.shape
),
alpha=0.75,
cmap=ListedColormap(("salmon", "dodgerblue")),
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(
X_set[y_set == j, 0],
X_set[y_set == j, 1],
c=ListedColormap(("salmon", "dodgerblue"))(i),
label=j,
)
plt.title("Logistic Regression (Training set)")
plt.xlabel("Age")
plt.ylabel("Estimated Salary")
plt.legend()
plt.show()
# visualize test set results
X_set, y_set = sc.inverse_transform(X_test), y_test
X1, X2 = np.meshgrid(
np.arange(start=X_set[:, 0].min() - 10, stop=X_set[:, 0].max() + 10, step=0.25),
np.arange(start=X_set[:, 1].min() - 1000, stop=X_set[:, 1].max() + 1000, step=0.25),
)
plt.contourf(
X1,
X2,
classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(
X1.shape
),
alpha=0.75,
cmap=ListedColormap(("salmon", "dodgerblue")),
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(
X_set[y_set == j, 0],
X_set[y_set == j, 1],
c=ListedColormap(("salmon", "dodgerblue"))(i),
label=j,
)
plt.title("Logistic Regression (Test set)")
plt.xlabel("Age")
plt.ylabel("Estimated Salary")
plt.legend()
plt.show()