Skip to content

Commit fa62e9b

Browse files
feat: rename epoch_size to epoch_count (#962)
### Summary of Changes In `fit` methods of `NeuralNetworkRegressor` and `NeuralNetworkClassifier`, rename the parameter `epoch_size` to `epoch_count. The previous name did not match, since an epoch has no size. --------- Co-authored-by: megalinter-bot <129584137+megalinter-bot@users.noreply.github.com>
1 parent afafd43 commit fa62e9b

File tree

8 files changed

+56
-56
lines changed

8 files changed

+56
-56
lines changed

docs/tutorials/convolutional_neural_network_for_image_classification.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -197,11 +197,11 @@
197197
"id": "3d8efa74951725cb"
198198
},
199199
{
200-
"cell_type": "code",
201-
"source": "cnn_fitted = cnn.fit(dataset, epoch_size=8, batch_size=16)",
202200
"metadata": {
203201
"collapsed": false
204202
},
203+
"cell_type": "code",
204+
"source": "cnn_fitted = cnn.fit(dataset, epoch_count=8, batch_size=16)",
205205
"id": "381627a94d500675",
206206
"outputs": [],
207207
"execution_count": null

docs/tutorials/time_series_forecasting.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@
205205
" forecast_horizon=1,\n",
206206
" continuous=False,\n",
207207
" extra_names= [\"date\"]\n",
208-
"), epoch_size=25)"
208+
"), epoch_count=25)"
209209
],
210210
"metadata": {
211211
"collapsed": false,

src/safeds/ml/nn/_model.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkRegressor: # p
166166
def fit(
167167
self,
168168
train_data: IFT,
169-
epoch_size: int = 25,
169+
epoch_count: int = 25,
170170
batch_size: int = 1,
171171
learning_rate: float = 0.001,
172172
callback_on_batch_completion: Callable[[int, float], None] | None = None,
@@ -181,7 +181,7 @@ def fit(
181181
----------
182182
train_data:
183183
The data the network should be trained on.
184-
epoch_size:
184+
epoch_count:
185185
The number of times the training cycle should be done.
186186
batch_size:
187187
The size of data batches that should be loaded at one time.
@@ -202,7 +202,7 @@ def fit(
202202
Raises
203203
------
204204
OutOfBoundsError
205-
If epoch_size < 1
205+
If epoch_count < 1
206206
If batch_size < 1
207207
"""
208208
import torch
@@ -218,7 +218,7 @@ def fit(
218218
if not self._input_conversion._is_fit_data_valid(train_data):
219219
raise FeatureDataMismatchError
220220

221-
_check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
221+
_check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
222222
_check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
223223

224224
copied_model = copy.deepcopy(self)
@@ -236,7 +236,7 @@ def fit(
236236
loss_fn = nn.MSELoss()
237237

238238
optimizer = torch.optim.SGD(copied_model._model.parameters(), lr=learning_rate)
239-
for _ in range(epoch_size):
239+
for _ in range(epoch_count):
240240
loss_sum = 0.0
241241
amount_of_loss_values_calculated = 0
242242
for x, y in iter(dataloader):
@@ -273,7 +273,7 @@ def fit(
273273
# "median_absolute_deviation",
274274
# "coefficient_of_determination",
275275
# ],
276-
# epoch_size: int = 25,
276+
# epoch_count: int = 25,
277277
# batch_size: int = 1,
278278
# learning_rate: float = 0.001,
279279
# ) -> Self:
@@ -288,7 +288,7 @@ def fit(
288288
# The data the network should be trained on.
289289
# optimization_metric:
290290
# The metric that should be used for determining the performance of a model.
291-
# epoch_size:
291+
# epoch_count:
292292
# The number of times the training cycle should be done.
293293
# batch_size:
294294
# The size of data batches that should be loaded at one time.
@@ -317,7 +317,7 @@ def fit(
317317
# "Hyperparameter optimization is currently not supported for CNN Regression Tasks.",
318318
# ) # pragma: no cover
319319
#
320-
# _check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
320+
# _check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
321321
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
322322
#
323323
# list_of_models = self._get_models_for_all_choices()
@@ -334,7 +334,7 @@ def fit(
334334
# executor.submit(
335335
# model.fit,
336336
# train_set, # type: ignore[arg-type]
337-
# epoch_size,
337+
# epoch_count,
338338
# batch_size,
339339
# learning_rate,
340340
# ),
@@ -774,7 +774,7 @@ def from_pretrained_model(huggingface_repo: str) -> NeuralNetworkClassifier: #
774774
def fit(
775775
self,
776776
train_data: IFT,
777-
epoch_size: int = 25,
777+
epoch_count: int = 25,
778778
batch_size: int = 1,
779779
learning_rate: float = 0.001,
780780
callback_on_batch_completion: Callable[[int, float], None] | None = None,
@@ -789,7 +789,7 @@ def fit(
789789
----------
790790
train_data:
791791
The data the network should be trained on.
792-
epoch_size:
792+
epoch_count:
793793
The number of times the training cycle should be done.
794794
batch_size:
795795
The size of data batches that should be loaded at one time.
@@ -810,7 +810,7 @@ def fit(
810810
Raises
811811
------
812812
ValueError
813-
If epoch_size < 1
813+
If epoch_count < 1
814814
If batch_size < 1
815815
"""
816816
import torch
@@ -831,7 +831,7 @@ def fit(
831831
if not self._input_conversion._is_fit_data_valid(train_data):
832832
raise FeatureDataMismatchError
833833

834-
_check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
834+
_check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
835835
_check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
836836

837837
copied_model = copy.deepcopy(self)
@@ -856,7 +856,7 @@ def fit(
856856
loss_fn = nn.BCELoss()
857857

858858
optimizer = torch.optim.SGD(copied_model._model.parameters(), lr=learning_rate)
859-
for _ in range(epoch_size):
859+
for _ in range(epoch_count):
860860
loss_sum = 0.0
861861
amount_of_loss_values_calculated = 0
862862
for x, y in iter(dataloader):
@@ -890,7 +890,7 @@ def fit(
890890
# train_data: IFT,
891891
# optimization_metric: Literal["accuracy", "precision", "recall", "f1_score"],
892892
# positive_class: Any = None,
893-
# epoch_size: int = 25,
893+
# epoch_count: int = 25,
894894
# batch_size: int = 1,
895895
# learning_rate: float = 0.001,
896896
# ) -> Self:
@@ -907,7 +907,7 @@ def fit(
907907
# The metric that should be used for determining the performance of a model.
908908
# positive_class:
909909
# The class to be considered positive. Only needs to be provided when choosing precision, recall or f1_score as the optimization metric.
910-
# epoch_size:
910+
# epoch_count:
911911
# The number of times the training cycle should be done.
912912
# batch_size:
913913
# The size of data batches that should be loaded at one time.
@@ -936,7 +936,7 @@ def fit(
936936
# "Continuous Predictions are currently not supported for Time Series Classification.",
937937
# )
938938
#
939-
# _check_bounds("epoch_size", epoch_size, lower_bound=_ClosedBound(1))
939+
# _check_bounds("epoch_count", epoch_count, lower_bound=_ClosedBound(1))
940940
# _check_bounds("batch_size", batch_size, lower_bound=_ClosedBound(1))
941941
#
942942
# list_of_models = self._get_models_for_all_choices()
@@ -956,7 +956,7 @@ def fit(
956956
# executor.submit(
957957
# model.fit,
958958
# train_set, # type: ignore[arg-type]
959-
# epoch_size,
959+
# epoch_count,
960960
# batch_size,
961961
# learning_rate,
962962
# ),

tests/safeds/ml/nn/test_cnn_workflow.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33

44
import pytest
55
import torch
6+
from torch.types import Device
7+
68
from safeds._config import _get_device
79
from safeds.data.image.containers import ImageList
810
from safeds.data.image.containers._single_size_image_list import _SingleSizeImageList
@@ -27,8 +29,6 @@
2729
MaxPooling2DLayer,
2830
)
2931
from safeds.ml.nn.typing import VariableImageSize
30-
from torch.types import Device
31-
3232
from tests.helpers import configure_test_with_device, device_cpu, device_cuda, images_all, resolve_resource_path
3333

3434
if TYPE_CHECKING:
@@ -88,7 +88,7 @@ def test_should_train_and_predict_model(
8888
InputConversionImageToTable(image_dataset.input_size),
8989
layers,
9090
)
91-
nn = nn_original.fit(image_dataset, epoch_size=2)
91+
nn = nn_original.fit(image_dataset, epoch_count=2)
9292
assert nn_original._model is not nn._model
9393
prediction: ImageDataset = nn.predict(image_dataset.get_input())
9494
assert one_hot_encoder.inverse_transform(prediction.get_output()) == Table({"class": prediction_label})
@@ -147,7 +147,7 @@ def test_should_train_and_predict_model(
147147
InputConversionImageToColumn(image_dataset.input_size),
148148
layers,
149149
)
150-
nn = nn_original.fit(image_dataset, epoch_size=2)
150+
nn = nn_original.fit(image_dataset, epoch_count=2)
151151
assert nn_original._model is not nn._model
152152
prediction: ImageDataset = nn.predict(image_dataset.get_input())
153153
assert prediction.get_output() == Column("class", prediction_label)
@@ -188,7 +188,7 @@ def test_should_train_and_predict_model(
188188
InputConversionImageToImage(image_dataset.input_size),
189189
layers,
190190
)
191-
nn = nn_original.fit(image_dataset, epoch_size=20)
191+
nn = nn_original.fit(image_dataset, epoch_count=20)
192192
assert nn_original._model is not nn._model
193193
prediction = nn.predict(image_dataset.get_input())
194194
assert isinstance(prediction.get_output(), ImageList)
@@ -229,7 +229,7 @@ def test_should_train_and_predict_model_variable_image_size(
229229
InputConversionImageToImage(VariableImageSize.from_image_size(image_dataset.input_size)),
230230
layers,
231231
)
232-
nn = nn_original.fit(image_dataset, epoch_size=20)
232+
nn = nn_original.fit(image_dataset, epoch_count=20)
233233
assert nn_original._model is not nn._model
234234
prediction = nn.predict(
235235
image_dataset.get_input().resize(

tests/safeds/ml/nn/test_dropout_workflow.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
import pytest
2+
from torch.types import Device
3+
24
from safeds._config import _get_device
35
from safeds.data.tabular.containers import Table
46
from safeds.ml.nn import (
@@ -11,8 +13,6 @@
1113
DropoutLayer,
1214
ForwardLayer,
1315
)
14-
from torch.types import Device
15-
1616
from tests.helpers import configure_test_with_device, get_devices, get_devices_ids
1717

1818

@@ -32,6 +32,6 @@ def test_forward_model(device: Device) -> None:
3232
[ForwardLayer(neuron_count=1), DropoutLayer(probability=0.5)],
3333
)
3434

35-
fitted_model = model.fit(train_table.to_tabular_dataset("value"), epoch_size=1, learning_rate=0.01)
35+
fitted_model = model.fit(train_table.to_tabular_dataset("value"), epoch_count=1, learning_rate=0.01)
3636
assert fitted_model._model is not None
3737
assert fitted_model._model.state_dict()["_pytorch_layers.0._layer.weight"].device == _get_device()

tests/safeds/ml/nn/test_forward_workflow.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
import pytest
2+
from torch.types import Device
3+
24
from safeds._config import _get_device
35
from safeds.data.tabular.containers import Table
46
from safeds.data.tabular.transformation import StandardScaler
@@ -11,8 +13,6 @@
1113
from safeds.ml.nn.layers import (
1214
ForwardLayer,
1315
)
14-
from torch.types import Device
15-
1616
from tests.helpers import configure_test_with_device, get_devices, get_devices_ids, resolve_resource_path
1717

1818

@@ -38,7 +38,7 @@ def test_forward_model(device: Device) -> None:
3838
[ForwardLayer(neuron_count=1)],
3939
)
4040

41-
fitted_model = model.fit(train_table.to_tabular_dataset("target"), epoch_size=1, learning_rate=0.01)
41+
fitted_model = model.fit(train_table.to_tabular_dataset("target"), epoch_count=1, learning_rate=0.01)
4242
fitted_model.predict(test_table.remove_columns_except(["value"]))
4343
assert fitted_model._model is not None
4444
assert fitted_model._model.state_dict()["_pytorch_layers.0._layer.weight"].device == _get_device()

tests/safeds/ml/nn/test_lstm_workflow.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
import pytest
2+
from torch.types import Device
3+
24
from safeds._config import _get_device
35
from safeds.data.tabular.containers import Table
46
from safeds.data.tabular.transformation import RangeScaler
@@ -13,8 +15,6 @@
1315
GRULayer,
1416
LSTMLayer,
1517
)
16-
from torch.types import Device
17-
1818
from tests.helpers import configure_test_with_device, get_devices, get_devices_ids, resolve_resource_path
1919

2020

@@ -45,7 +45,7 @@ def test_lstm_model(device: Device) -> None:
4545
continuous=True,
4646
extra_names=["date"],
4747
),
48-
epoch_size=1,
48+
epoch_count=1,
4949
)
5050

5151
trained_model.predict(test_table)
@@ -57,7 +57,7 @@ def test_lstm_model(device: Device) -> None:
5757
continuous=False,
5858
extra_names=["date"],
5959
),
60-
epoch_size=1,
60+
epoch_count=1,
6161
)
6262

6363
trained_model_2.predict(test_table)

0 commit comments

Comments
 (0)