-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLinearRegression.cpp
More file actions
63 lines (56 loc) · 2.48 KB
/
LinearRegression.cpp
File metadata and controls
63 lines (56 loc) · 2.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
// Created by mahit on 10/27/2025.
#include <cmath>
#include "LinearRegression.h"
using namespace std;
void LinearRegression::trainLinearRegression(vector<stockDataPoint> trainingData, double learningRate, int maxIterations) {
if (trainingData.empty()) {
return;
}
//Gets the number of categories in an input file : open, high, close, low, etc.
int inputVariables = trainingData[0].stockInputCategories.size();
//Creates a weight for each variable above
weights.resize(inputVariables, 0.0);
vector<double> yPredictions;
//Actual Training Process of the model
for (int i = 0; i < maxIterations; i++) {
yPredictions.clear();
for (int j = 0; j < trainingData.size(); j++) {
//Calculate predictions using yPredictions. Loop through every stackDataPoint by calling predict function and state the result
double predictionPoint = predict(trainingData[j].stockInputCategories);
yPredictions.push_back(predictionPoint);
}
vector<double> oldWeights(weights);
/*"Gradient Descent" Calculation for Linear Regression
Outer loop iterates through each feature*/
for (int k = 0; k < weights.size(); k++) {
double sum = 0.0;
//Iterates through each data point in the feature
for (int j = 0; j < trainingData.size(); j++) {
/*Calculates the average error for a single weight by subtracting the difference between the model's guess
and the real answer and multiplying it by scaling it by the value of the corresponding input feature */
sum += (yPredictions[j] - trainingData[j].target) * (trainingData[j].stockInputCategories[k]);
}
//Learning step which updates the weights by taking the old value and should theoretically provide accurate predictions
weights[k] = oldWeights[k] - (learningRate/trainingData.size()) * sum;
}
}
}
double LinearRegression::predict(vector<double> features) {
double prediction = 0.0;
if (weights.size() != features.size()) {
return 0.0; // Return 0 if model is not trained
}
for (int i = 0; i < weights.size(); i++) {
prediction += weights[i] * features[i];
}
return prediction;
}
double LinearRegression::MSE(vector<double> predictions, vector<double> actual) {
double m = actual.size();
double loss = 0.0;
for (int i = 0; i < m; i++) {
loss += pow(predictions[i] - actual[i],2);
}
double finalLoss = loss/(2*m);
return finalLoss;
}