Skip to content

Commit

Permalink
build(deps): upgrade torch and lightning (#1)
Browse files Browse the repository at this point in the history
  • Loading branch information
DSergiu authored Nov 29, 2022
1 parent 052a70d commit aec1388
Show file tree
Hide file tree
Showing 21 changed files with 897 additions and 773 deletions.
37 changes: 37 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: 'CI'

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

on: push

jobs:
test:
name: 'Test'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- id: setup-python
uses: actions/setup-python@v4
with:
python-version: '3.8'

- id: python-cache
uses: actions/cache@v3
with:
path: |
venv
datasets
key: pip-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('requirements.txt') }}

- name: 'Install dependencies'
if: steps.python-cache.outputs.cache-hit != 'true'
run: |
if [ -d "venv" ]; then rm -rf venv; fi
python3 -m venv venv
venv/bin/python3 -m pip install -r requirements.txt
- name: 'Test'
run: venv/bin/python3 -m pytest
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ datasets
*.pth
*/lightning_logs
*/.ipynb_checkpoints
__pycache__
45 changes: 30 additions & 15 deletions 01-Linear-Regression/LinearRegression.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,20 @@
from argparse import ArgumentParser
import sys

sys.path.append('../src')

import torch
import torch.nn as nn
import torch.nn.functional as F
from argparse import ArgumentParser
from BaseLightningModule import BaseLightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.core import LightningModule
from torch import optim
from torch.utils.data import DataLoader


class LinearRegression(LightningModule):
def __init__(self,
learning_rate: float = 0.05,
**kwargs
):
class LinearRegression(BaseLightningModule):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
self.lin = nn.Linear(1, 1)
self.example_input_array = torch.rand(1, 1)

Expand All @@ -31,19 +30,34 @@ def training_step(self, batch, batch_idx):

def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]
return optimizer

def prepare_data(self):
return None

def setup(self, stage):
x_train = torch.tensor([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=torch.float32)
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=torch.float32)
y_train = torch.tensor([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=torch.float32)
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=torch.float32)
self.data_train = [[x_train, y_train]]

def train_dataloader(self):
return DataLoader(self.data_train, num_workers=4)
return DataLoader(self.data_train)

def val_dataloader(self):
return None

def test_dataloader(self):
return None

@staticmethod
def add_model_specific_args():
parser = ArgumentParser()
parser.add_argument('--learning_rate', default=0.05, type=float, help='the learning rate')
return parser


def main(args):
Expand All @@ -53,7 +67,8 @@ def main(args):


if __name__ == '__main__':
parser = ArgumentParser()
parser = LinearRegression.add_model_specific_args()
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(accelerator='gpu', devices=1, check_val_every_n_epoch=5)
args = parser.parse_args()
main(args)
18 changes: 8 additions & 10 deletions 01-Linear-Regression/Notebook.ipynb

Large diffs are not rendered by default.

54 changes: 10 additions & 44 deletions 02-Logistic-Regression/LogisticRegression.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,18 @@
from argparse import ArgumentParser
import sys

sys.path.append('../src')

import torch
import torch.nn as nn
import torch.nn.functional as F
from BaseLightningModule import BaseLightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.core import LightningModule
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms


class LogisticRegression(LightningModule):
def __init__(self,
data_root: str = '../datasets',
batch_size: int = 64,
learning_rate: float = 0.01,
**kwargs
):
class LogisticRegression(BaseLightningModule):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
self.seq = nn.Sequential(
nn.Flatten(),
nn.Linear(28 * 28, 10)
Expand All @@ -28,15 +22,6 @@ def __init__(self,
def forward(self, x):
return self.seq(x)

def training_step(self, batch, batch_idx):
return self._share_step(batch, 'train', log_acc=False)

def validation_step(self, batch, batch_idx):
return self._share_step(batch, 'val')

def test_step(self, batch, batch_idx):
return self._share_step(batch, 'test')

def _share_step(self, batch, prefix, log_acc=True):
x, y = batch
y_hat = self(x)
Expand All @@ -52,38 +37,19 @@ def _share_step(self, batch, prefix, log_acc=True):

def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]

def prepare_data(self):
datasets.MNIST(self.hparams.data_root, train=True, download=True)
datasets.MNIST(self.hparams.data_root, train=False, download=True)

def setup(self, stage):
transform = transforms.Compose([transforms.ToTensor()])
train = datasets.MNIST(self.hparams.data_root, train=True, download=False, transform=transform)
self.mnist_train, self.mnist_val = torch.utils.data.random_split(train, [50000, 10000])
self.mnist_test = datasets.MNIST(self.hparams.data_root, train=False, download=False, transform=transform)

def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.hparams.batch_size, num_workers=4, shuffle=True)

def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.hparams.batch_size, num_workers=4)

def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.hparams.batch_size, num_workers=4)
return optimizer


def main(args):
model = LogisticRegression(**vars(args))
trainer = Trainer.from_argparse_args(args)
trainer.fit(model)
trainer.test(model)

# Manually call test which runs the test loop and logs accuracy and loss
trainer.test()

if __name__ == '__main__':
parser = ArgumentParser()
parser = LogisticRegression.add_model_specific_args()
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(accelerator='gpu', devices=1, check_val_every_n_epoch=5)
args = parser.parse_args()
main(args)
167 changes: 104 additions & 63 deletions 02-Logistic-Regression/Notebook.ipynb

Large diffs are not rendered by default.

54 changes: 10 additions & 44 deletions 03-Neural-Network/NeuralNetwork.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,18 @@
from argparse import ArgumentParser
import sys

sys.path.append('../src')

import torch
import torch.nn as nn
import torch.nn.functional as F
from BaseLightningModule import BaseLightningModule
from pytorch_lightning import Trainer
from pytorch_lightning.core import LightningModule
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms


class NeuralNetwork(LightningModule):
def __init__(self,
data_root: str = '../datasets',
batch_size: int = 64,
learning_rate: float = 0.01,
**kwargs
):
class NeuralNetwork(BaseLightningModule):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
self.seq = nn.Sequential(
nn.Flatten(),
nn.Linear(28 * 28, 300),
Expand All @@ -33,15 +27,6 @@ def __init__(self,
def forward(self, x):
return self.seq(x)

def training_step(self, batch, batch_idx):
return self._share_step(batch, 'train', log_acc=False)

def validation_step(self, batch, batch_idx):
return self._share_step(batch, 'val')

def test_step(self, batch, batch_idx):
return self._share_step(batch, 'test')

def _share_step(self, batch, prefix, log_acc=True):
x, y = batch
y_hat = self(x)
Expand All @@ -57,38 +42,19 @@ def _share_step(self, batch, prefix, log_acc=True):

def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]

def prepare_data(self):
datasets.MNIST(self.hparams.data_root, train=True, download=True)
datasets.MNIST(self.hparams.data_root, train=False, download=True)

def setup(self, stage):
transform = transforms.Compose([transforms.ToTensor()])
train = datasets.MNIST(self.hparams.data_root, train=True, download=False, transform=transform)
self.mnist_train, self.mnist_val = torch.utils.data.random_split(train, [50000, 10000])
self.mnist_test = datasets.MNIST(self.hparams.data_root, train=False, download=False, transform=transform)

def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.hparams.batch_size, num_workers=4, shuffle=True)

def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.hparams.batch_size, num_workers=4)

def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.hparams.batch_size, num_workers=4)
return optimizer


def main(args):
model = NeuralNetwork(**vars(args))
trainer = Trainer.from_argparse_args(args)
trainer.fit(model)
trainer.test(model)

# Manually call test which runs the test loop and logs accuracy and loss
trainer.test()

if __name__ == '__main__':
parser = ArgumentParser()
parser = NeuralNetwork.add_model_specific_args()
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(accelerator='gpu', devices=1, check_val_every_n_epoch=5)
args = parser.parse_args()
main(args)
Loading

0 comments on commit aec1388

Please sign in to comment.