@@ -166,7 +166,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkRegressor: # p
166
166
def fit (
167
167
self ,
168
168
train_data : IFT ,
169
- epoch_size : int = 25 ,
169
+ epoch_count : int = 25 ,
170
170
batch_size : int = 1 ,
171
171
learning_rate : float = 0.001 ,
172
172
callback_on_batch_completion : Callable [[int , float ], None ] | None = None ,
@@ -181,7 +181,7 @@ def fit(
181
181
----------
182
182
train_data:
183
183
The data the network should be trained on.
184
- epoch_size :
184
+ epoch_count :
185
185
The number of times the training cycle should be done.
186
186
batch_size:
187
187
The size of data batches that should be loaded at one time.
@@ -202,7 +202,7 @@ def fit(
202
202
Raises
203
203
------
204
204
OutOfBoundsError
205
- If epoch_size < 1
205
+ If epoch_count < 1
206
206
If batch_size < 1
207
207
"""
208
208
import torch
@@ -218,7 +218,7 @@ def fit(
218
218
if not self ._input_conversion ._is_fit_data_valid (train_data ):
219
219
raise FeatureDataMismatchError
220
220
221
- _check_bounds ("epoch_size " , epoch_size , lower_bound = _ClosedBound (1 ))
221
+ _check_bounds ("epoch_count " , epoch_count , lower_bound = _ClosedBound (1 ))
222
222
_check_bounds ("batch_size" , batch_size , lower_bound = _ClosedBound (1 ))
223
223
224
224
copied_model = copy .deepcopy (self )
@@ -236,7 +236,7 @@ def fit(
236
236
loss_fn = nn .MSELoss ()
237
237
238
238
optimizer = torch .optim .SGD (copied_model ._model .parameters (), lr = learning_rate )
239
- for _ in range (epoch_size ):
239
+ for _ in range (epoch_count ):
240
240
loss_sum = 0.0
241
241
amount_of_loss_values_calculated = 0
242
242
for x , y in iter (dataloader ):
@@ -273,7 +273,7 @@ def fit(
273
273
# "median_absolute_deviation",
274
274
# "coefficient_of_determination",
275
275
# ],
276
- # epoch_size : int = 25,
276
+ # epoch_count : int = 25,
277
277
# batch_size: int = 1,
278
278
# learning_rate: float = 0.001,
279
279
# ) -> Self:
@@ -288,7 +288,7 @@ def fit(
288
288
# The data the network should be trained on.
289
289
# optimization_metric:
290
290
# The metric that should be used for determining the performance of a model.
291
- # epoch_size :
291
+ # epoch_count :
292
292
# The number of times the training cycle should be done.
293
293
# batch_size:
294
294
# The size of data batches that should be loaded at one time.
@@ -317,7 +317,7 @@ def fit(
317
317
# "Hyperparameter optimization is currently not supported for CNN Regression Tasks.",
318
318
# ) # pragma: no cover
319
319
#
320
- # _check_bounds("epoch_size ", epoch_size , lower_bound=_ClosedBound(1))
320
+ # _check_bounds("epoch_count ", epoch_count , lower_bound=_ClosedBound(1))
321
321
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
322
322
#
323
323
# list_of_models = self._get_models_for_all_choices()
@@ -334,7 +334,7 @@ def fit(
334
334
# executor.submit(
335
335
# model.fit,
336
336
# train_set, # type: ignore[arg-type]
337
- # epoch_size ,
337
+ # epoch_count ,
338
338
# batch_size,
339
339
# learning_rate,
340
340
# ),
@@ -774,7 +774,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkClassifier: #
774
774
def fit (
775
775
self ,
776
776
train_data : IFT ,
777
- epoch_size : int = 25 ,
777
+ epoch_count : int = 25 ,
778
778
batch_size : int = 1 ,
779
779
learning_rate : float = 0.001 ,
780
780
callback_on_batch_completion : Callable [[int , float ], None ] | None = None ,
@@ -789,7 +789,7 @@ def fit(
789
789
----------
790
790
train_data:
791
791
The data the network should be trained on.
792
- epoch_size :
792
+ epoch_count :
793
793
The number of times the training cycle should be done.
794
794
batch_size:
795
795
The size of data batches that should be loaded at one time.
@@ -810,7 +810,7 @@ def fit(
810
810
Raises
811
811
------
812
812
ValueError
813
- If epoch_size < 1
813
+ If epoch_count < 1
814
814
If batch_size < 1
815
815
"""
816
816
import torch
@@ -831,7 +831,7 @@ def fit(
831
831
if not self ._input_conversion ._is_fit_data_valid (train_data ):
832
832
raise FeatureDataMismatchError
833
833
834
- _check_bounds ("epoch_size " , epoch_size , lower_bound = _ClosedBound (1 ))
834
+ _check_bounds ("epoch_count " , epoch_count , lower_bound = _ClosedBound (1 ))
835
835
_check_bounds ("batch_size" , batch_size , lower_bound = _ClosedBound (1 ))
836
836
837
837
copied_model = copy .deepcopy (self )
@@ -856,7 +856,7 @@ def fit(
856
856
loss_fn = nn .BCELoss ()
857
857
858
858
optimizer = torch .optim .SGD (copied_model ._model .parameters (), lr = learning_rate )
859
- for _ in range (epoch_size ):
859
+ for _ in range (epoch_count ):
860
860
loss_sum = 0.0
861
861
amount_of_loss_values_calculated = 0
862
862
for x , y in iter (dataloader ):
@@ -890,7 +890,7 @@ def fit(
890
890
# train_data: IFT,
891
891
# optimization_metric: Literal["accuracy", "precision", "recall", "f1_score"],
892
892
# positive_class: Any = None,
893
- # epoch_size : int = 25,
893
+ # epoch_count : int = 25,
894
894
# batch_size: int = 1,
895
895
# learning_rate: float = 0.001,
896
896
# ) -> Self:
@@ -907,7 +907,7 @@ def fit(
907
907
# The metric that should be used for determining the performance of a model.
908
908
# positive_class:
909
909
# The class to be considered positive. Only needs to be provided when choosing precision, recall or f1_score as the optimization metric.
910
- # epoch_size :
910
+ # epoch_count :
911
911
# The number of times the training cycle should be done.
912
912
# batch_size:
913
913
# The size of data batches that should be loaded at one time.
@@ -936,7 +936,7 @@ def fit(
936
936
# "Continuous Predictions are currently not supported for Time Series Classification.",
937
937
# )
938
938
#
939
- # _check_bounds("epoch_size ", epoch_size , lower_bound=_ClosedBound(1))
939
+ # _check_bounds("epoch_count ", epoch_count , lower_bound=_ClosedBound(1))
940
940
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
941
941
#
942
942
# list_of_models = self._get_models_for_all_choices()
@@ -956,7 +956,7 @@ def fit(
956
956
# executor.submit(
957
957
# model.fit,
958
958
# train_set, # type: ignore[arg-type]
959
- # epoch_size ,
959
+ # epoch_count ,
960
960
# batch_size,
961
961
# learning_rate,
962
962
# ),
0 commit comments