from torch import randn as torch_randn
from fastai.vision.all import test_eqNetworks
get_network_class
get_network_class (model_name:str)
regist_network
regist_network (model_class)
x = torch_randn(16, 1, 32, 64, 64)
xdim = len(x.shape)-2
tst = ConvLayer(1, 1, ndim=xdim)
test_eq(tst(x).shape, [16, 1, 32, 64, 64])
tst = MaxPool(2, ndim=xdim)
test_eq(tst(x).shape, [16, 1, 16, 32, 32])
tst = Lambda(lambda x: x+np.float32(1e-3))
test_eq(tst(x).shape, [16, 1, 32, 64, 64])
test_eq(torch_cat((x, tst(x)), 1).shape, [16, 2, 32, 64, 64])DnCNN
DnCNN
DnCNN (channels, num_of_layers=18, features=64)
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
x = torch_randn(16, 1, 32, 64)
tst = DnCNN(1)
test_eq(tst(x).shape, [16, 1, 32, 64])
print(tst)DnCNN(
(dncnn): Sequential(
(0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(4): ReLU(inplace=True)
(5): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(7): ReLU(inplace=True)
(8): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(9): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(10): ReLU(inplace=True)
(11): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(12): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(13): ReLU(inplace=True)
(14): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(16): ReLU(inplace=True)
(17): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(19): ReLU(inplace=True)
(20): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(21): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(22): ReLU(inplace=True)
(23): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(25): ReLU(inplace=True)
(26): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(28): ReLU(inplace=True)
(29): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(30): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(31): ReLU(inplace=True)
(32): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(34): ReLU(inplace=True)
(35): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(36): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(37): ReLU(inplace=True)
(38): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(39): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(40): ReLU(inplace=True)
(41): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(42): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(43): ReLU(inplace=True)
(44): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(45): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(46): ReLU(inplace=True)
(47): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(48): BatchNorm2d(64, eps=0.0001, momentum=0.9, affine=True, track_running_stats=True)
(49): ReLU(inplace=True)
(50): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
My UNet
SubNetConv
SubNetConv (ks=3, stride=1, padding=None, bias=None, ndim=2, norm_type=<NormType.Batch: 1>, bn_1st=True, act_cls=<class 'torch.nn.modules.activation.ReLU'>, transpose=False, init='auto', xtra=None, bias_std=0.01, dropout=0.0)
x = torch_randn(16, 1, 32, 64, 64)
xdim = len(x.shape)-2
# reduce
tst = SubNetConv(3, padding=1, stride=2, ndim=xdim,
norm_type=NormType.Batch, dropout=.1)(1, 2, 2)
y = tst(x)
test_eq(y.shape, [16, 2, 8, 16, 16])
print(tst)
# upsample
tst = SubNetConv(ks=4, padding=0, stride=4, ndim=xdim, norm_type=NormType.Batch,
transpose=True)(2, 1) # to double the size, the kernel cannot be odd
test_eq(tst(y).shape, [16, 1, 32, 64, 64])
print(tst)
del y
# ConvLayer(2*n_out_channels, n_out_channels, ks=ks, transpose=True, padding=(ks-1)//2)Sequential(
(0): Sequential(
(0): ConvLayer(
(0): Conv3d(1, 2, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1), bias=False)
(1): BatchNorm3d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(1): Dropout(p=0.1, inplace=False)
)
(1): Sequential(
(0): ConvLayer(
(0): Conv3d(2, 2, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1), bias=False)
(1): BatchNorm3d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(1): Dropout(p=0.1, inplace=False)
)
)
ConvLayer(
(0): ConvTranspose3d(2, 1, kernel_size=(4, 4, 4), stride=(4, 4, 4), bias=False)
(1): BatchNorm3d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
MyUNet
MyUNet (depth=4, mult_chan=32, in_channels=1, out_channels=1, last_activation=None, kernel_size=3, ndim=2, n_conv_per_depth=2, activation='ReLU', norm_type=<NormType.Batch: 1>, dropout=0.0, pool=<function MaxPool>, pool_size=2, residual=False, prob_out=False, eps_scale=0.001)
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
| Type | Default | Details | |
|---|---|---|---|
| depth | int | 4 | depth of the UNet network |
| mult_chan | int | 32 | number of filters at first layer |
| in_channels | int | 1 | number of input channels |
| out_channels | int | 1 | number of output channels |
| last_activation | NoneType | None | last activation before final result |
| kernel_size | int | 3 | kernel size of convolutional layers |
| ndim | int | 2 | number of spatial dimensions of the input data |
| n_conv_per_depth | int | 2 | number of convolutions per layer |
| activation | str | ReLU | activation function used in convolutional layers |
| norm_type | NormType | NormType.Batch | normalization type for layers |
| dropout | float | 0.0 | dropout rate |
| pool | function | MaxPool | pooling layer type |
| pool_size | int | 2 | pooling size |
| residual | bool | False | use residual connection |
| prob_out | bool | False | output probability scale |
| eps_scale | float | 0.001 | epsilon for scale output |
# show_doc(UNet)x = torch_randn(16, 1, 32, 64, 64)
xdim = len(x.shape)-2
tst = MyUNet(depth=1, ndim=xdim, n_conv_per_depth=1, residual=True)
mods = list(tst.children())
print(mods)
test_eq(tst(x).shape, [16, 1, 32, 64, 64])[_Net_recurse(
(sub_conv_more): ConvLayer(
(0): Conv3d(1, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)
(1): BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(sub_u): Sequential(
(0): MaxPool3d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(1): _Net_recurse(
(sub_conv_more): ConvLayer(
(0): Conv3d(32, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)
(1): BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
)
(2): Upsample(scale_factor=2.0, mode='nearest')
)
(sub_conv_less): ConvLayer(
(0): Conv3d(96, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)
(1): BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
), ConvLayer(
(0): Conv3d(32, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
)]
UNet
UNetUpBlock
UNetUpBlock (in_size, out_size, up_mode, padding, batch_norm)
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
UNetConvBlock
UNetConvBlock (in_size, out_size, padding, batch_norm, drop_p=0.15)
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
UNet
UNet (in_channels=1, n_classes=1, depth=5, wf=6, padding=True, batch_norm=True, up_mode='upconv', residual=True, drop_p=0.15)
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.
.. note:: As per the example above, an __init__() call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
device = 'cuda'
x = torch_randn(16, 1, 64, 64, device=device)
xdim = len(x.shape)-2
tst = UNet(depth=1).to(device)
mods = list(tst.children())
print(mods)
test_eq(tst(x).shape, [16, 1, 64, 64])[ModuleList(
(0): UNetConvBlock(
(block): Sequential(
(0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): ReLU()
(5): Dropout2d(p=0.15, inplace=False)
(6): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
), ModuleList(), Conv2d(64, 1, kernel_size=(1, 1), stride=(1, 1))]
ResNet 1D
ResidualBlock
ResidualBlock (features, context_features, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False, zero_initialization=True)
A general-purpose residual block. Works only with 1-dim inputs.
ResidualNet
ResidualNet (in_features, out_features, hidden_features, context_features=None, num_blocks=2, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
A general-purpose residual network. Works only with 1-dim inputs.
x = torch_randn(16, 1, 1, 1)
xdim = len(x.shape)-2
tst = ResidualNet(1,1,1)
mods = list(tst.children())
print(mods)
assert tst(x).shape == x.size()[Linear(in_features=1, out_features=1, bias=True), ModuleList(
(0-1): 2 x ResidualBlock(
(linear_layers): ModuleList(
(0-1): 2 x Linear(in_features=1, out_features=1, bias=True)
)
)
), Linear(in_features=1, out_features=1, bias=True)]