-
Notifications
You must be signed in to change notification settings - Fork 33
Open
Description
I'm trying to implement the res2net in a different way, following how fastai wrote their xresnet.
The res2block I have is
def no_op(x): return x # no operations done if ni == nf
class Res2block(nn.Module):
def __init__(self, expansion, ni, nh, stride = 1, base_width = 26, scale = 4, first_block = False):
"""
ni: number of in channels
nh: number of hidden channels
base_width: basic width of conv3x3
scale: scaling ratio for the convs
first_block: whether the block is the first to be placed in the conv layer
"""
super(Res2block, self).__init__()
self.first_block = first_block
self.scale = scale
nf, ni = nh*expansion, ni*expansion
width = int(math.floor(nf*(base_width/64.)))
# print(width)
self.conv1 = conv_layer(ni, width*scale, 1, stride = stride)
# print(ni, width*scale)
self.conv3 = conv_layer(width*scale, nh*expansion, kernel_size=1, act = False) # no act_fn
n_branches = max(2, scale) - 1
if self.first_block:
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
self.convs = nn.ModuleList([conv_layer(width, width, 3, stride = stride) for _ in range(n_branches)])
# if ni != nf, use a 1x1 conv to get the same channels, otherwise return x (no operations)
self.idconv = no_op if ni == nf else conv_layer(ni, nf, 1, act = False)
self.pooling = no_op if stride == 1 else nn.AvgPool2d(2, ceil_mode=True)
def forward(self, x):
x1 = self.conv1(x) #conv2d 1x1 -> bn -> act_fn
# splitting into self.scale equal sized chunks
xs = torch.chunk(x1, self.scale, dim = 1)
#initialize output tensor for concatenation later on
y = 0
for idx, conv in enumerate(self.convs):
# print(self.pooling(xs[idx]).shape)
# xs[idx] = self.pooling(xs[idx])
# temp = self.pooling(xs[idx])
if self.first_block:
y = xs[idx]
"""
Something needs to be fixed here for when stride != 1
"""
else:
print('idx', idx, 'xs[idx].shape', xs[idx].shape)
if idx > 0:
print('idx', idx, 'y shape', y.shape)
# y += self.idconv(self.pooling(xs[idx])) # add the residual for the 2nd and onwards chunks
# print('pooled x[idx]', self.pooling(xs[idx]).shape)
y += xs[idx]
# y += self.pooling(xs[idx])
y = conv(y)
x1 = torch.cat((x1, y), 1) if idx >0 else y # concat outputs, but not the 1st chunk
if self.scale > 1:
if self.first_block:
x1 = torch.cat((x1, self.pool(xs[len(self.convs)])), 1) #concat all the outputs together
else:
x1 = torch.cat((x1, xs[len(self.convs)]),1)
x1 = self.conv3(x1) # conv1x1 -> bn -> no act_fn
# computing the residual, changing nf or dimensions if not matching x1
x2 = self.idconv(self.pooling(x))
out = x1+x2
return out
I'm getting an error for the res2block when stride > 1
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-37-511cce144281> in <module>()
12
13 a2 = Res2block(expansion, 3, 20, stride = 2).to(device)
---> 14 print(a2(tmp).shape)
15 bs, n_channels, H, W = a2(tmp).size()
16 print(H, W, H*W)
1 frames
<ipython-input-32-0b935b01f069> in forward(self, x)
68 # y += self.idconv(self.pooling(xs[idx])) # add the residual for the 2nd and onwards chunks
69 # print('pooled x[idx]', self.pooling(xs[idx]).shape)
---> 70 y += xs[idx]
71 # y += self.pooling(xs[idx])
72
RuntimeError: The size of tensor a (57) must match the size of tensor b (113) at non-singleton dimension 3
I ran a check of the dimensions, and I have
idx 0 xs[idx].shape torch.Size([16, 32, 113, 113])
idx 1 xs[idx].shape torch.Size([16, 32, 113, 113])
idx 1 y shape torch.Size([16, 32, 57, 57])
Do you know what might be causing this?
Metadata
Metadata
Assignees
Labels
No labels