forked from cm-amaya/UNet_Multiclass
-
Notifications
You must be signed in to change notification settings - Fork 0
/
augmentation.py
103 lines (79 loc) · 2.35 KB
/
augmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import albumentations as A
import cv2
def round_clip_0_1(x, **kwargs):
return x.round().clip(0, 1)
# define heavy augmentations
def get_training_augmentation(imgsize = 320):
train_transform = [
A.HorizontalFlip(p=0.5),
A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
A.PadIfNeeded(min_height=imgsize, min_width=imgsize, always_apply=True, border_mode=0),
A.RandomCrop(height=imgsize, width=imgsize, always_apply=True),
A.IAAAdditiveGaussianNoise(p=0.2),
A.IAAPerspective(p=0.5),
A.OneOf(
[
A.CLAHE(p=1),
A.RandomBrightness(p=1),
A.RandomGamma(p=1),
],
p=0.9,
),
A.OneOf(
[
A.IAASharpen(p=1),
A.Blur(blur_limit=3, p=1),
A.MotionBlur(blur_limit=3, p=1),
],
p=0.9,
),
A.OneOf(
[
A.RandomContrast(p=1),
A.HueSaturationValue(p=1),
],
p=0.9,
),
A.Lambda(mask=round_clip_0_1)
]
return A.Compose(train_transform)
def get_validation_augmentation(imgsize = 800):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(imgsize, imgsize),
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
class GrayToRGB(A.ImageOnlyTransform):
"""
Targets:
image
Image types:
uint8, float32
"""
def __init__(self, p=1.0):
super(GrayToRGB, self).__init__(p)
def apply(self, img, **params):
return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
class RGBToGray(A.ImageOnlyTransform):
"""
Targets:
image
Image types:
uint8, float32
"""
def __init__(self, p=1.0):
super(RGBToGray, self).__init__(p)
def apply(self, img, **params):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)