-
Notifications
You must be signed in to change notification settings - Fork 20
/
logistic_regression_suv_prediction.py
92 lines (75 loc) · 2.84 KB
/
logistic_regression_suv_prediction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from matplotlib.colors import ListedColormap
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Loading dataset from csv
dataset=pd.read_csv(r'SocialNetworkAds.csv')
print(dataset)
# extracting independent variables
X = dataset.iloc[:,[2,3]].values
print(X)
# extracting dependent variables
Y = dataset.iloc[:,4].values
print(Y)
# Visualizing data prior to processing
sns.heatmap(dataset.corr())
# splitting dataset into training and testing sets
X_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=1/3,random_state=0)
# Feature scaling
standard_Scaler=StandardScaler()
X_train = standard_Scaler.fit_transform(X_train)
x_test = standard_Scaler.transform(x_test)
# fit Logistic Regression to training dataset
log_reg=LogisticRegression(random_state=0)
log_reg.fit(X_train,y_train)
# predicting result with testing datasets
y_pred=log_reg.predict(x_test)
print(y_pred)
print(y_test)
# visualizing the training set result
X_set,y_set = X_train,y_train
X1,X2 = np.meshgrid(np.arange(start=X_set[:,0].min() - 1,stop=X_set[:,0].max()+1,step=0.01),
np.arange(start=X_set[:,1].min() - 1,stop=X_set[:,1].max()+1,step=0.01))
plt.contourf(X1,X2,log_reg.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,cmap=ListedColormap(('blue','green')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set ==j,0],X_set[y_set == j,1],
c=ListedColormap(['blue','green'])(i),label=j)
plt.title('Logistic egression (Train set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# visualizing the testing set result
X_set,y_set = x_test,y_test
X1,X2 = np.meshgrid(np.arange(start=X_set[:,0].min() - 1,stop=X_set[:,0].max()+1,step=0.01),
np.arange(start=X_set[:,1].min() - 1,stop=X_set[:,1].max()+1,step=0.01))
plt.contourf(X1,X2,log_reg.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,cmap=ListedColormap(('blue','red')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set ==j,0],X_set[y_set == j,1],
c=ListedColormap(['blue','red'])(i),label=j)
plt.title('Logistic egression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# evaluating model with confusion matrix
conf_matrix = confusion_matrix(y_test,y_pred)
print(conf_matrix)
# accuracy
accuracy = (79+38)/len(y_test)
print(accuracy)
# misclassification rate
mis_cla_rate = (11+6)/len(y_test)
print(mis_cla_rate)