-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathregression_example.py
More file actions
108 lines (82 loc) · 3.42 KB
/
regression_example.py
File metadata and controls
108 lines (82 loc) · 3.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#!/usr/bin/env python3
"""
KortexDL Regression - California Housing (Stable)
==================================================
Stable training with conservative hyperparameters.
Usage:
python regression_example.py
"""
import numpy as np
import kortexdl as bd
try:
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
SKLEARN_AVAILABLE = True
except ImportError:
SKLEARN_AVAILABLE = False
def main():
print("🎯 KortexDL Regression - California Housing")
print("=" * 60)
if not SKLEARN_AVAILABLE:
print("❌ sklearn required: pip install scikit-learn")
return 1
# Load dataset
print("\n📁 Loading California Housing dataset...")
data = fetch_california_housing()
X, y = data.data, data.target
# Use subset
X = X[:3000].astype(np.float32)
y = y[:3000].reshape(-1, 1).astype(np.float32)
print(f"✅ Dataset: {len(X)} samples, {X.shape[1]} features")
print(f"✅ Target: Median house value (in $100k)")
# Normalize features AND target
scaler_X = StandardScaler()
scaler_y = StandardScaler()
X = scaler_X.fit_transform(X).astype(np.float32)
y = scaler_y.fit_transform(y).astype(np.float32)
# Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"✅ Split: {len(X_train)} train, {len(X_test)} test")
# Simple network
print("\n🧠 Creating network...")
net = bd.Network([8, 32, 16, 1], bd.ActivationType.Tanh)
print("✅ Network: 8 -> 32 -> 16 -> 1 (Tanh)")
# Full batch, moderate LR
print("\n🏋️ Training...")
epochs = 1000
learning_rate = 0.01 # Reduced from 0.1 for better stability
X_flat = X_train.flatten().tolist()
y_flat = y_train.flatten().tolist()
best_r2 = -999
for epoch in range(epochs):
loss = net.train_batch(X_flat, y_flat, bd.LossType.MSE, learning_rate, len(X_train))
if epoch % 100 == 0:
predictions = [net.forward(X_test[i].tolist(), 1, False)[0] for i in range(len(X_test))]
r2 = bd.compute_r2_score(y_test.flatten().tolist(), predictions)
best_r2 = max(best_r2, r2)
print(f" Epoch {epoch:3d}: Loss = {loss:.4f}, R² = {r2:.4f}")
print(f"\n✅ Training complete! Best R²: {best_r2:.4f}")
# Final evaluation
print("\n📈 Final Evaluation...")
predictions = [net.forward(X_test[i].tolist(), 1, False)[0] for i in range(len(X_test))]
predictions = np.array(predictions)
# Inverse transform
y_test_orig = scaler_y.inverse_transform(y_test)
pred_orig = scaler_y.inverse_transform(predictions.reshape(-1, 1))
mse = bd.compute_mse(y_test.flatten().tolist(), predictions.tolist())
r2 = bd.compute_r2_score(y_test.flatten().tolist(), predictions.tolist())
print(f"✅ Test MSE: {mse:.4f}")
print(f"✅ Test R²: {r2:.4f}")
# Sample predictions
print("\n🔍 Sample Predictions (in $100k):")
for i in range(5):
true_val = y_test_orig[i][0]
pred_val = pred_orig[i][0]
error = abs(true_val - pred_val)
print(f" True: ${true_val*100:.0f}k Pred: ${pred_val*100:.0f}k Error: ${error*100:.0f}k")
print("\n" + "=" * 60)
print("✅ Complete!")
return 0
if __name__ == "__main__":
exit(main())