depthwise convolution(卷积后通道数不改变): 对于128x128x512的特征图, 用512个3x3的卷积核分别对每个通道进行卷积, 得到了通道为512的特征图
pointwise convolution:用n个1x1卷积,将上述通道数为512的特征图变为通道数为n的特征图。
MobileNet的基本单元是深度级可分离卷积(depthwise separable convolution),其实这种结构之前已经被使用在Inception模型中。==深度级可分离卷积其实是一种可分解卷积操作(factorized convolutions),其可以分解为两个更小的操作:depthwise convolution和pointwise convolution。Depthwise convolution和标准卷积不同,对于标准卷积其卷积核是用在所有的输入通道(inputchannels),而depthwise convolution针对每个输入通道采用不同的卷积核,就是说一个卷积核对应一个输入通道,所以说depthwise convolution是depth级别的操作。==而pointwise convolution其实就是普通的卷积,只不过其采用1x1的卷积核。
对于 depthwise separable convolution,其首先是采用depthwise convolution对不同输入通道分别进行卷积,然后采用pointwise convolution将上面的输出再进行结合,这样其实整体效果和一个标准卷积是差不多的,但是会大大减少计算量和模型参数量。
(1)主要应用了深度可分离卷积来代替传统的卷积操作,并且放弃pooling层,直接采用stride = 2进行卷积运算。
(2)把标准卷积分解成深度卷积(depthwise convolution)和逐点卷积(pointwise convolution)。这么做的好处是可以大幅度降低参数量和计算量。
(3)用两个超参数来控制网络计算速度与准确度之间的平衡,宽度调节参数和分辨率参数,主要用于压缩模型。
#数据预处理并设置 learning schedule
def color_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]
return x_train, x_test
def scheduler(epoch):
if epoch < 100:
return 0.01
if epoch < 200:
return 0.001
return 0.0001
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train, x_test = color_preprocessing(x_train, x_test)
#定义网络结构
def depthwise_separable(x,params):
# f1/f2 filter size, s1 stride of conv
(s1,f2) = params
x = DepthwiseConv2D((3,3),strides=(s1[0],s1[0]), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(int(f2[0]), (1,1), strides=(1,1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
#搭建网络
def MobileNet(img_input,shallow=False, classes=10):
"""Instantiates the MobileNet.Network has two hyper-parameters
which are the width of network (controlled by alpha)
and input size.
# Arguments
alpha: optional parameter of the network to change the
width of model.
shallow: optional parameter for making network smaller.
classes: optional number of classes to classify images
into.
"""
x = Conv2D(int(32), (3,3), strides=(2,2), padding='same')(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = depthwise_separable(x,params=[(1,),(64,)])
x = depthwise_separable(x,params=[(2,),(128,)])
x = depthwise_separable(x,params=[(1,),(128,)])
x = depthwise_separable(x,params=[(2,),(256,)])
x = depthwise_separable(x,params=[(1,),(256,)])
x = depthwise_separable(x,params=[(2,),(512,)])
if not shallow:
for _ in range(5):
x = depthwise_separable(x,params=[(1,),(512,)])
x = depthwise_separable(x,params=[(2,),(1024,)])
x = depthwise_separable(x,params=[(1,),(1024,)])
x = GlobalAveragePooling2D()(x)
out = Dense(classes, activation='softmax')(x)
return out
#生成模型
img_input=Input(shape=(32,32,3))
output = MobileNet(img_input)
model=Model(img_input,output)
model.summary()
#开始训练
# set optimizer
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# set callback
tb_cb = TensorBoard(log_dir=log_filepath, histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr,tb_cb]
# set data augmentation
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,
height_shift_range=0.125,
fill_mode='constant',cval=0.)
datagen.fit(x_train)
# start training
model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_test, y_test))
model.save('mobilenet.h5')
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d\
(in_planes, in_planes, kernel_size=3, stride=stride,
padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d\
(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2,
# by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2),
512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
test()
因篇幅问题不能全部显示,请点此查看更多更全内容