【深度学习】用caffe+ShuffleNet-V2做回归
重要参考来源
用到的github源码
caffe
shuffle_channel_layer
conv_dw_layer
convert_imageset.cpp代码详解
功能:将数据集制作成lmdb或者leveldb数据
目前caffe原始的convert_imageset只支持单分类任务
// This program converts a set of images to a lmdb/leveldb by storing them
// as Datum proto buffers.
// Usage:
// convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
//
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
// subfolder1/file1.JPEG 7
// ....
#include <algorithm>
#include <fstream> // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>
#include "boost/scoped_ptr.hpp"
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/rng.hpp"
using namespace caffe; // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;
DEFINE_bool(gray, false,
"When this option is on, treat images as grayscale ones");
// 布尔型参数:如果是True,图片按灰度图处理。默认是False,也就是按BGR处理
DEFINE_bool(shuffle, false,
"Randomly shuffle the order of images and their labels");
// 布尔型参数:如果是True,随机shuffle一下数据集。默认是False,也就是不shauffle
DEFINE_string(backend, "lmdb",
"The backend {lmdb, leveldb} for storing the result");
// 字符串参数:数据储存结果是lmdb还是leveldb。默认是lmdb
DEFINE_int32(resize_width, 0, "Width images are resized to");
// 整型参数:图片要resize到的width。默认是0
DEFINE_int32(resize_height, 0, "Height images are resized to");
// 整型参数:图片要resize到的height。默认是0
DEFINE_bool(check_size, false,
"When this option is on, check that all the datum have the same size");
// 布尔型参数:如果是True,检查所有数据是否是相同的大小。默认是False,也就是不检查
DEFINE_bool(encoded, false,
"When this option is on, the encoded image will be save in datum");
// 布尔型参数:如果是True,编码的图片将会被保存到数据中。默认是False
DEFINE_string(encode_type, "",
"Optional: What type should we encode the image as ('png','jpg',...).");
// 字符串参数:编码类型,png、jpg等。默认是""
int main(int argc, char** argv) {
// argc,为整型,用来统计程序运行时发送给main函数的命令行参数的个数
// argv[],为字符串数组,用来存放指向的字符串参数的指针数组
#ifdef USE_OPENCV
::google::InitGoogleLogging(argv[0]);
// 初始化glog参数
FLAGS_alsologtostderr = 1;
// 当FLAGS_alsologtostderr为真时,忽略FLAGS_stderrthreshold的限制,所有等级的信息都打印到终端
#ifndef GFLAGS_GFLAGS_H_
namespace gflags = google;
// 命名空间重定向
#endif
gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n"
"format used as input for Caffe.\n"
"Usage:\n"
" convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n"
"The ImageNet dataset for the training demo is at\n"
" http://www.image-net.org/download-images\n");
// 设置命令行帮助信息。设置帮助信息后,当运行编译生成的可执行文件时参数错误或加 --help 选项可以打印帮助信息
gflags::ParseCommandLineFlags(&argc, &argv, true);
// 解析命令行参数
if (argc < 4) {
// 如果命令行参数少于4个,主程序终止,返回1
gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/convert_imageset");
return 1;
}
const bool is_color = !FLAGS_gray;
// 常量布尔型变量:如果按GRAY处理,应为False;如果按BGR处理,应为True
const bool check_size = FLAGS_check_size;
// 常量布尔型变量:如果检查数据大小是否统一,应为True;否则应为False
const bool encoded = FLAGS_encoded;
// 常量布尔型变量:如果编码保存,应为True;否则应为False
const string encode_type = FLAGS_encode_type;
// 常量字符串变量:"png"或是"jpg"或是""
std::ifstream infile(argv[2]);
// argv[2]指向执行程序名后的第二个字符串
// 读文件
std::vector<std::pair<std::string, int> > lines;
// 像数组一样,vector使用连续存储空间存储元素,这意味着它们的元素也可以使用指向其元素的指针进行偏移来访问
// 但与数组不同的是, vector的大小可以动态变化,并且是由容器自动处理的
// lines是文件每一行的内容,每一行是:图片路径 label
std::string line;
// 字符串型变量
// line代表一行
size_t pos;
// size_t型变量,一般用于计数
// pos记录空格的位子
int label;
// 整型变量
// label
while (std::getline(infile, line)) {
pos = line.find_last_of(' ');
// 找到空格的位置,赋给pos
label = atoi(line.substr(pos + 1).c_str());
// 从pos+1位置,到最后,是label的内容,赋给label
lines.push_back(std::make_pair(line.substr(0, pos), label));
// 把图片路径和label成对压进lines
}
if (FLAGS_shuffle) {
LOG(INFO) << "Shuffling data";
shuffle(lines.begin(), lines.end());
// 如过shuffle,把lines从头到尾shuffle一下
}
LOG(INFO) << "A total of " << lines.size() << " images.";
// 打印一共有多少图片
if (encode_type.size() && !encoded)
// 如果选定了某一编码格式,encoded又是False。
LOG(INFO) << "encode_type specified, assuming encoded=true.";
// 就是说,如果选定了某一编码个格式,即使encoded是False,也强行变为True
int resize_height = std::max<int>(0, FLAGS_resize_height);
// 整型变量,0和resize_height的最大值
int resize_width = std::max<int>(0, FLAGS_resize_width);
// 整型变量,0和resize_width的最大值
scoped_ptr<db::DB> db(db::GetDB(FLAGS_backend));
db->Open(argv[3], db::NEW);
scoped_ptr<db::Transaction> txn(db->NewTransaction());
// 创建一个新的db文件
std::string root_folder(argv[1]);
// root_folder是图片根路径
Datum datum;
int count = 0;
int data_size = 0;
bool data_size_initialized = false;
for (int line_id = 0; line_id < lines.size(); ++line_id) {
// 对lines中的每一行进行处理
bool status;
std::string enc = encode_type;
// enc是编码格式,"png"或"jpg"或""
if (encoded && !enc.size()) {
// 如果encoded是True,但是enc是"",则需要猜测编码格式
string fn = lines[line_id].first;
// fn是图片路径
size_t p = fn.rfind('.');
// 找到字符"."的位置
if ( p == fn.npos )
// 如果没有字符".",打印猜测失败
LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
enc = fn.substr(p);
// 从"."的位置,一直截取到fn的最后。如果猜测失败,enc为""
std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
// 把后缀转化成小写
}
status = ReadImageToDatum(root_folder + lines[line_id].first,
lines[line_id].second, resize_height, resize_width, is_color,
enc, &datum);
// 调用函数ReadImageToDatum
// 如果status是True则成功了,是False则失败了
if (status == false) continue;
// 失败了就跳过当前循环
if (check_size) {
// 成功了,可以检查数据大小是否一致
if (!data_size_initialized) {
// 第一次进入if
data_size = datum.channels() * datum.height() * datum.width();
// 得到数据大小
data_size_initialized = true;
} else {
// 以后每次都进入else
const std::string& data = datum.data();
CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
<< data.size();
// 对比,判断是否相同
}
}
string key_str = caffe::format_int(line_id, 8) + "_" + lines[line_id].first;
string out;
CHECK(datum.SerializeToString(&out));
txn->Put(key_str, out);
if (++count % 1000 == 0) {
// 每1000组数据commit一下
txn->Commit();
txn.reset(db->NewTransaction());
LOG(INFO) << "Processed " << count << " files.";
}
}
// write the last batch
if (count % 1000 != 0) {
// 最后一组不足1000的commit一下
txn->Commit();
LOG(INFO) << "Processed " << count << " files.";
}
#else
LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV.";
#endif // USE_OPENCV
return 0;
}
ReadImageToDatum等相关函数代码详解
功能:读取图片,并将图片和label读入Datum
#ifdef USE_OPENCV
cv::Mat ReadImageToCVMat(const string& filename,
const int height, const int width, const bool is_color) {
cv::Mat cv_img;
int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR :
CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag);
// 读取图片
if (!cv_img_origin.data) {
LOG(ERROR) << "Could not open or find file " << filename;
return cv_img_origin;
// 如果没有读到图,报错返回空
}
if (height > 0 && width > 0) {
cv::resize(cv_img_origin, cv_img, cv::Size(width, height));
// 如果height>0并且width>0,图片resize到height和width指定大小
} else {
cv_img = cv_img_origin;
// 否则,保留原图尺寸
}
return cv_img;
// 返回图片内容
}
cv::Mat ReadImageToCVMat(const string& filename,
const int height, const int width) {
return ReadImageToCVMat(filename, height, width, true);
}
// is_color默认是True
cv::Mat ReadImageToCVMat(const string& filename,
const bool is_color) {
return ReadImageToCVMat(filename, 0, 0, is_color);
}
// height和width默认是0,也就是保留原图尺寸
cv::Mat ReadImageToCVMat(const string& filename) {
return ReadImageToCVMat(filename, 0, 0, true);
}
static bool matchExt(const std::string & fn,
std::string en) {
size_t p = fn.rfind('.');
std::string ext = p != fn.npos ? fn.substr(p+1) : fn;
// 如果有后缀:截取后缀
// 如果没有后缀:保留整个字符串
std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
// 后缀变小写
std::transform(en.begin(), en.end(), en.begin(), ::tolower);
// 编码格式变小写
if ( ext == en )
// 如果后缀和编码格式相同,返回True
return true;
if ( en == "jpg" && ext == "jpeg" )
// jpg和jpeg是同一个,也返回True
return true;
return false;
// 否则返回False
}
bool ReadImageToDatum(const string& filename, const int label,
const int height, const int width, const bool is_color,
const std::string & encoding, Datum* datum) {
// 第一个参数:图片完整路径
// 第二个参数:图片label
// 第三个参数:0或height
// 第四个参数:0或width
// 第五个参数:按BGR处理还是按GRAY处理
// 第六个参数:编码格式
// 第七个参数:datum
cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color);
// 调用函数ReadImageToCVMat,得到图片
if (cv_img.data) {
if (encoding.size()) {
// 如果编码格式不是""
if ( (cv_img.channels() == 3) == is_color && !height && !width &&
matchExt(filename, encoding) )
// 如果(三通道,且is_color为True),并且(height和width都是零),并且(matchExt(filename, encoding)是True)
return ReadFileToDatum(filename, label, datum);
// 调用ReadFileToDatum函数
std::vector<uchar> buf;
cv::imencode("."+encoding, cv_img, buf);
// 将cv::Mat数据编码成数据流
datum->set_data(std::string(reinterpret_cast<char*>(&buf[0]),
buf.size()));
datum->set_label(label);
datum->set_encoded(true);
return true;
// 返回True
}
// 如果没有编码格式是"",调用函数CVMatToDatum
CVMatToDatum(cv_img, datum);
datum->set_label(label);
return true;
// 返回True
} else {
return false;
// 如果没读到图,返回False
}
}
#endif // USE_OPENCV
bool ReadFileToDatum(const string& filename, const int label,
Datum* datum) {
std::streampos size;
fstream file(filename.c_str(), ios::in|ios::binary|ios::ate);
// 以二进制写的方式打开图片
if (file.is_open()) {
size = file.tellg();
// 得到当前定位指针的位置,也代表着输入流的大小
std::string buffer(size, ' ');
file.seekg(0, ios::beg);
// 基地址为文件开始处,偏移地址为0,于是指针定位在文件开始处
file.read(&buffer[0], size);
file.close();
datum->set_data(buffer);
datum->set_label(label);
datum->set_encoded(true);
return true;
} else {
return false;
// 如果没打开,返回False
}
}
void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) {
CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte";
datum->set_channels(cv_img.channels());
datum->set_height(cv_img.rows);
datum->set_width(cv_img.cols);
datum->clear_data();
datum->clear_float_data();
datum->set_encoded(false);
int datum_channels = datum->channels();
int datum_height = datum->height();
int datum_width = datum->width();
int datum_size = datum_channels * datum_height * datum_width;
std::string buffer(datum_size, ' ');
for (int h = 0; h < datum_height; ++h) {
const uchar* ptr = cv_img.ptr<uchar>(h);
int img_index = 0;
for (int w = 0; w < datum_width; ++w) {
for (int c = 0; c < datum_channels; ++c) {
int datum_index = (c * datum_height + h) * datum_width + w;
buffer[datum_index] = static_cast<char>(ptr[img_index++]);
}
}
}
datum->set_data(buffer);
}
convert_imageset.cpp修改思路
因为回归任务回归几个浮点值,所以文件应该由原来的格式:图片路径[空格]label,变为:图片路径[空格]浮点数1[空格]浮点数2[空格]浮点数3[空格]浮点数4[空格]…
所以处理文件以及将label(这里是回归的浮点数)读入Datum部分要做相应修改
// This program converts a set of images to a lmdb/leveldb by storing them
// as Datum proto buffers.
// Usage:
// convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
//
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
// subfolder1/file1.JPEG 7
// ....
#include <algorithm>
#include <fstream> // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>
#include "boost/scoped_ptr.hpp"
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/rng.hpp"
#include <boost/tokenizer.hpp>
// 使用tokenizer
using namespace caffe; // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;
DEFINE_bool(gray, false,
"When this option is on, treat images as grayscale ones");
DEFINE_bool(shuffle, false,
"Randomly shuffle the order of images and their labels");
DEFINE_string(backend, "lmdb",
"The backend {lmdb, leveldb} for storing the result");
DEFINE_int32(resize_width, 0, "Width images are resized to");
DEFINE_int32(resize_height, 0, "Height images are resized to");
DEFINE_bool(check_size, false,
"When this option is on, check that all the datum have the same size");
DEFINE_bool(encoded, false,
"When this option is on, the encoded image will be save in datum");
DEFINE_string(encode_type, "",
"Optional: What type should we encode the image as ('png','jpg',...).");
int main(int argc, char** argv) {
#ifdef USE_OPENCV
::google::InitGoogleLogging(argv[0]);
// Print output to stderr (while still logging)
FLAGS_alsologtostderr = 1;
#ifndef GFLAGS_GFLAGS_H_
namespace gflags = google;
#endif
gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n"
"format used as input for Caffe.\n"
"Usage:\n"
" convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n"
"The ImageNet dataset for the training demo is at\n"
" http://www.image-net.org/download-images\n");
gflags::ParseCommandLineFlags(&argc, &argv, true);
if (argc < 4) {
gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/convert_imageset");
return 1;
}
const bool is_color = !FLAGS_gray;
const bool check_size = FLAGS_check_size;
const bool encoded = FLAGS_encoded;
const string encode_type = FLAGS_encode_type;
std::ifstream infile(argv[2]);
// std::vector<std::pair<std::string, int> > lines;
std::vector<std::pair<std::string, std::vector<float> > > lines;
// 将lines的结构变成,一个图片的路径与一组浮点数构成一对
std::string line;
// size_t pos;
// int label;
std::vector<float> labels;
// labels,放一组要回归的浮点数的值
while (std::getline(infile, line)) {
// pos = line.find_last_of(' ');
// label = atoi(line.substr(pos + 1).c_str());
// lines.push_back(std::make_pair(line.substr(0, pos), label));
std::vector<std::string> tokens;
// tokons,放一组字符串
boost::char_separator<char> sep(" ");
boost::tokenizer<boost::char_separator<char> > tok(line, sep);
// 把line按" "切分,切出来的每项放到tok里
tokens.clear();
// 每次循环清空一下tokens
std::copy(tok.begin(), tok.end(), std::back_inserter(tokens));
// 复制了tok中的全部元素并将它们添加到tokens的末尾,执行完该语句后,tokens的长度将增加tok.size()
for (int i = 1; i < tokens.size(); ++i)
{
//从第一个开始,将切分得到的每项放到labels里
labels.push_back(atof(tokens.at(i).c_str()));
}
lines.push_back(std::make_pair(tokens.at(0), labels));
// 把切分得到的第0个元素,和labels成对放入lines
labels.clear();
// 清空一下labels
}
if (FLAGS_shuffle) {
// randomly shuffle data
LOG(INFO) << "Shuffling data";
shuffle(lines.begin(), lines.end());
}
LOG(INFO) << "A total of " << lines.size() << " images.";
if (encode_type.size() && !encoded)
LOG(INFO) << "encode_type specified, assuming encoded=true.";
int resize_height = std::max<int>(0, FLAGS_resize_height);
int resize_width = std::max<int>(0, FLAGS_resize_width);
// Create new DB
scoped_ptr<db::DB> db(db::GetDB(FLAGS_backend));
db->Open(argv[3], db::NEW);
scoped_ptr<db::Transaction> txn(db->NewTransaction());
// Storing to db
std::string root_folder(argv[1]);
Datum datum;
int count = 0;
int data_size = 0;
bool data_size_initialized = false;
for (int line_id = 0; line_id < lines.size(); ++line_id) {
bool status;
std::string enc = encode_type;
if (encoded && !enc.size()) {
// Guess the encoding type from the file name
string fn = lines[line_id].first;
size_t p = fn.rfind('.');
if ( p == fn.npos )
LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
enc = fn.substr(p+1);
std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
}
status = ReadImageToDatum(root_folder + lines[line_id].first,
lines[line_id].second, resize_height, resize_width, is_color,
enc, &datum);
// 这样第二个参数就不是label,而是labels了,所以ReadImageToDatum等相关函数也要进行修改
if (status == false) continue;
if (check_size) {
if (!data_size_initialized) {
data_size = datum.channels() * datum.height() * datum.width();
data_size_initialized = true;
} else {
const std::string& data = datum.data();
CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
<< data.size();
}
}
// sequential
string key_str = caffe::format_int(line_id, 8) + "_" + lines[line_id].first;
// Put in db
string out;
CHECK(datum.SerializeToString(&out));
txn->Put(key_str, out);
if (++count % 1000 == 0) {
// Commit db
txn->Commit();
txn.reset(db->NewTransaction());
LOG(INFO) << "Processed " << count << " files.";
}
}
// write the last batch
if (count % 1000 != 0) {
txn->Commit();
LOG(INFO) << "Processed " << count << " files.";
}
#else
LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV.";
#endif // USE_OPENCV
return 0;
}
ReadImageToDatum等相关函数修改思路
ReadImageToDatum函数的第二个参数从一个整型的label变成了一个浮点型的vector
所以,io.hpp中相关的形参类型说明也要修改
可以不改动原来的函数声明(因为C++支持函数重载,这里指参数有所不同),而在它的下面接上:
bool ReadImageToDatum(const string& filename, const vector<float> labels,
const int height, const int width, const bool is_color,
const std::string & encoding, Datum* datum);
在bool ReadFileToDatum函数声明下面添加:
bool ReadFileToDatum(const string& filename, const vector<float> labels, Datum* datum);
io.cpp中在ReadImageToDatum函数下面进行添加:
bool ReadImageToDatum(const string& filename, const vector<float> labels,
const int height, const int width, const bool is_color,
const std::string & encoding, Datum* datum) {
cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color);
if (cv_img.data) {
if (encoding.size()) {
if ( (cv_img.channels() == 3) == is_color && !height && !width &&
matchExt(filename, encoding) )
return ReadFileToDatum(filename, labels, datum);
// 第二个参数由lable改为labels
std::vector<uchar> buf;
cv::imencode("."+encoding, cv_img, buf);
datum->set_data(std::string(reinterpret_cast<char*>(&buf[0]),
buf.size()));
// datum->set_label(label);
for (int i = 0; i < labels.size(); ++i)
{
datum->add_float_data(labels.at(i));
// 按labels的数量,挨个往datum里读
}
datum->set_encoded(true);
return true;
}
CVMatToDatum(cv_img, datum);
// datum->set_label(label);
for (int i = 0; i < labels.size(); ++i)
{
datum->add_float_data(labels.at(i));
// 按labels的数量,挨个往datum里读
}
return true;
} else {
return false;
}
}
io.cpp中在ReadFileToDatum函数下面进行添加:
bool ReadFileToDatum(const string& filename, const vector<float> labels,
Datum* datum) {
std::streampos size;
fstream file(filename.c_str(), ios::in|ios::binary|ios::ate);
if (file.is_open()) {
size = file.tellg();
std::string buffer(size, ' ');
file.seekg(0, ios::beg);
file.read(&buffer[0], size);
file.close();
datum->set_data(buffer);
// datum->set_label(label);
for (int i = 0; i < labels.size(); ++i)
{
datum->add_float_data(labels.at(i));
// 按labels的数量,挨个往datum里读
}
datum->set_encoded(true);
return true;
} else {
return false;
}
}
下面内容转载自用caffe做回归(上)
datum->add_float_data(labels.at(i));
这个函数是怎么来的,第一次用的时候怎么会知道有这个函数?
这就得来看看caffe.proto文件了,里面关于Datum的代码如下:
message Datum {
optional int32 channels = 1;
optional int32 height = 2;
optional int32 width = 3;
// the actual image data, in bytes
optional bytes data = 4;
optional int32 label = 5;
// Optionally, the datum could also hold float data.
repeated float float_data = 6;
// If true data contains an encoded image that need to be decoded
optional bool encoded = 7 [default = false];
}
.proto文件是Google开发的一种协议接口,根据这个,可以自动生成caffe.pb.h和caffe.pb.cc文件
==
optional int32 label = 5;
这条就是用于做分类的
==
repeated float float_data = 6;
这条就是用来做回归的
==
在caffe.pb.h文件中可以找到关于做回归的这部分自动生成的代码:
// repeated float float_data = 6;
int float_data_size() const;
void clear_float_data();
static const int kFloatDataFieldNumber = 6;
float float_data(int index) const;
void set_float_data(int index, float value);
void add_float_data(float value);
const ::google::protobuf::RepeatedField< float >&
float_data() const;
::google::protobuf::RepeatedField< float >*
mutable_float_data();
data_layer.cpp代码及修改思路
功能:从lmdb或leveldb中读取图片信息,先是反序列化成Datum,然后再放进Blob中
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#endif // USE_OPENCV
#include <stdint.h>
#include <vector>
#include "caffe/data_transformer.hpp"
#include "caffe/layers/data_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
DataLayer<Dtype>::DataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param),
offset_() {
db_.reset(db::GetDB(param.data_param().backend()));
db_->Open(param.data_param().source(), db::READ);
cursor_.reset(db_->NewCursor());
}
template <typename Dtype>
DataLayer<Dtype>::~DataLayer() {
this->StopInternalThread();
}
template <typename Dtype>
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.data_param().batch_size();
// Read a data point, and use it to initialize the top blob.
Datum datum;
datum.ParseFromString(cursor_->value());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape top[0] and prefetch_data according to the batch_size.
top_shape[0] = batch_size;
top[0]->Reshape(top_shape);
for (int i = 0; i < this->prefetch_.size(); ++i) {
this->prefetch_[i]->data_.Reshape(top_shape);
}
LOG_IF(INFO, Caffe::root_solver())
<< "output data size: " << top[0]->num() << ","
<< top[0]->channels() << "," << top[0]->height() << ","
<< top[0]->width();
// label
// 处理label部分
/*
if (this->output_labels_) {
vector<int> label_shape(1, batch_size);
// label_shape(1, batch_size)
top[1]->Reshape(label_shape);
// top[1]reshape成label_shape的尺寸
for (int i = 0; i < this->prefetch_.size(); ++i) {
this->prefetch_[i]->label_.Reshape(label_shape);
}
}
*/
int labelNum = 4;
// 一张图有几个回归值,这里设置为4
if (this->output_labels_) {
vector<int> label_shape;
label_shape.push_back(batch_size);
label_shape.push_back(labelNum);
label_shape.push_back(1);
label_shape.push_back(1);
// push_back的四个值分别对应Blob的num,channels,height,width
// 因为top[1]对应的是标签,所以num设置为batch_size,channels设置为labelNum,height和width设置为1即可
top[1]->Reshape(label_shape);
for (int i = 0; i < this->prefetch_.size(); ++i) {
this->prefetch_[i]->label_.Reshape(label_shape);
}
}
}
template <typename Dtype>
bool DataLayer<Dtype>::Skip() {
int size = Caffe::solver_count();
int rank = Caffe::solver_rank();
bool keep = (offset_ % size) == rank ||
// In test mode, only rank 0 runs, so avoid skipping
this->layer_param_.phase() == TEST;
return !keep;
}
template<typename Dtype>
void DataLayer<Dtype>::Next() {
cursor_->Next();
if (!cursor_->valid()) {
LOG_IF(INFO, Caffe::root_solver())
<< "Restarting data prefetching from start.";
cursor_->SeekToFirst();
}
offset_++;
}
// This function is called on prefetch thread
template<typename Dtype>
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
const int batch_size = this->layer_param_.data_param().batch_size();
Datum datum;
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
while (Skip()) {
Next();
}
datum.ParseFromString(cursor_->value());
read_time += timer.MicroSeconds();
if (item_id == 0) {
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
}
// Apply data transformations (mirror, scale, crop...)
timer.Start();
int offset = batch->data_.offset(item_id);
Dtype* top_data = batch->data_.mutable_cpu_data();
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
/*
if (this->output_labels_) {
Dtype* top_label = batch->label_.mutable_cpu_data();
top_label[item_id] = datum.label();
// datum中的label值赋值给top_label
}
*/
int labelNum = 4;
if (this->output_labels_) {
Dtype* top_label = batch->label_.mutable_cpu_data();
for(int i=0;i<labelNum;i++){
top_label[item_id*labelNum+i] = datum.float_data(i+labelNum*item_id); //read float labels
}
}
trans_time += timer.MicroSeconds();
Next();
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
INSTANTIATE_CLASS(DataLayer);
REGISTER_LAYER_CLASS(Data);
} // namespace caffe
修改项目清单
- 修改convert_imageset.cpp文件
- 修改io.hpp和io.cpp文件
- 修改data_layer.cpp文件
- 添加conv_dw_layer.cpp和conv_dw_layer.cu文件到/caffe_root/src/caffe/layers
- 添加conv_dw_layer.hpp文件到/caffe_root/include/caffe/layers
- 添加shuffle_channel_layer.cpp和shuffle_channel_layer.cu文件到/caffe_root/src/caffe/layers
- 添加shuffle_channel_layer.hpp文件到/caffe_root/include/caffe/layers
- 在caffe.proto中添加:
message LayerParameter {
...
optional ShuffleChannelParameter shuffle_channel_param = 164;
...
}
...
message ShuffleChannelParameter {
optional uint32 group = 1[default = 1]; // The number of group
}
生成lmdb数据
按下面格式制作文件:
图片名1 label1_1 label1_2 label1_3 label1_4
图片名2 label2_1 label2_2 label2_3 label2_4
图片名3 label3_1 label3_2 label3_3 label3_4
…
运行编译生成的convert_imageset可执行文件:
./convert_imageset --gray=false --shuffle=true --backend=lmdb --resize_width=0 --resize_height=0 --check_size=false --encoded=true --encode_type=jpg 图片根路径 上一步制作的文件 希望保存生成lmdb文件的路径
要注意在源码中,生成完整图片路径用的是简单的"+"。所以,图片根路径最后和制作的文件中图片名最前,有且仅有一个"/",否则会报错找不到图片
成功生成lmdb格式的数据:
Date层和Loss层参数说明
layer {
name:"data"
type:"Data"
top:"data"
top:"label"
include {
phase: TRAIN
}
transform_param {
mirror: true
// 随机镜像图片,默认是false:不随机镜像
crop_size: 0
// 随机切割图片到指定大小,默认是0:不切割
mean_file: "做减均值处理的均值文件,mean.binaryproto"
}
data_param {
source: "lmdb数据路径"
batch_size: 1
backend: LMDB
}
}
生成.binaryproto文件的方法:
用编译caffe生成的compute_image_mean可执行文件
./compute_image_mean --backend=lmdb lmdb文件的路径 输出文件.binaryproto
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "fc4"
bottom: "label"
top: "loss"
}
Gconv和DWconv运算过程详解
Gconv(组卷积)和DWconv(深度可分离卷积)是ShuffleNet-V2的核心组件。这里用博主手绘图详解这两种卷积进行的操作:
train.prototxt样例
name: "shufflenet_v2"
layer {
name:"data"
type:"Data"
top:"data"
top:"label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 0
mean_file: "train.binaryproto"
}
data_param {
source: "lmdb"
batch_size: 1
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
num_output: 24
pad: 1
kernel_size: 3
stride: 2
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv1_bn"
type: "BatchNorm"
bottom: "conv1"
top: "conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv1_scale"
bottom: "conv1"
top: "conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv1_relu"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "branch1_1_conv1"
type: "ConvolutionDepthwise"
bottom: "pool1"
top: "branch1_1_conv1"
convolution_param {
num_output: 24
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch1_1_conv1_bn"
type: "BatchNorm"
bottom: "branch1_1_conv1"
top: "branch1_1_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch1_1_conv1_scale"
bottom: "branch1_1_conv1"
top: "branch1_1_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch1_1_conv2"
type: "Convolution"
bottom: "branch1_1_conv1"
top: "branch1_1_conv2"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch1_1_conv2_bn"
type: "BatchNorm"
bottom: "branch1_1_conv2"
top: "branch1_1_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch1_1_conv2_scale"
bottom: "branch1_1_conv2"
top: "branch1_1_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch1_1_conv2_relu"
type: "ReLU"
bottom: "branch1_1_conv2"
top: "branch1_1_conv2"
}
layer {
name: "branch1_2_conv1"
type: "Convolution"
bottom: "pool1"
top: "branch1_2_conv1"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch1_2_conv1_bn"
type: "BatchNorm"
bottom: "branch1_2_conv1"
top: "branch1_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch1_2_conv1_scale"
bottom: "branch1_2_conv1"
top: "branch1_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch1_2_conv1_relu"
type: "ReLU"
bottom: "branch1_2_conv1"
top: "branch1_2_conv1"
}
layer {
name: "branch1_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch1_2_conv1"
top: "branch1_2_conv2"
convolution_param {
num_output: 58
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch1_2_conv2_bn"
type: "BatchNorm"
bottom: "branch1_2_conv2"
top: "branch1_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch1_2_conv2_scale"
bottom: "branch1_2_conv2"
top: "branch1_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch1_2_conv3"
type: "Convolution"
bottom: "branch1_2_conv2"
top: "branch1_2_conv3"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch1_2_conv3_bn"
type: "BatchNorm"
bottom: "branch1_2_conv3"
top: "branch1_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch1_2_conv3_scale"
bottom: "branch1_2_conv3"
top: "branch1_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch1_2_conv3_relu"
type: "ReLU"
bottom: "branch1_2_conv3"
top: "branch1_2_conv3"
}
layer {
name: "concat1"
type: "Concat"
bottom: "branch1_1_conv2"
bottom: "branch1_2_conv3"
top: "concat1"
}
layer {
name: "shuffle1"
type: "ShuffleChannel"
bottom: "concat1"
top: "shuffle1"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice2"
type: "Slice"
bottom: "shuffle1"
top: "branch2_1"
top: "branch2_2"
slice_param {
slice_point: 58
axis: 1
}
}
layer {
name: "branch2_2_conv1"
type: "Convolution"
bottom: "branch2_2"
top: "branch2_2_conv1"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch2_2_conv1_bn"
type: "BatchNorm"
bottom: "branch2_2_conv1"
top: "branch2_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch2_2_conv1_scale"
bottom: "branch2_2_conv1"
top: "branch2_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch2_2_conv1_relu"
type: "ReLU"
bottom: "branch2_2_conv1"
top: "branch2_2_conv1"
}
layer {
name: "branch2_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch2_2_conv1"
top: "branch2_2_conv2"
convolution_param {
num_output: 58
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch2_2_conv2_bn"
type: "BatchNorm"
bottom: "branch2_2_conv2"
top: "branch2_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch2_2_conv2_scale"
bottom: "branch2_2_conv2"
top: "branch2_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch2_2_conv3"
type: "Convolution"
bottom: "branch2_2_conv2"
top: "branch2_2_conv3"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch2_2_conv3_bn"
type: "BatchNorm"
bottom: "branch2_2_conv3"
top: "branch2_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch2_2_conv3_scale"
bottom: "branch2_2_conv3"
top: "branch2_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch2_2_conv3_relu"
type: "ReLU"
bottom: "branch2_2_conv3"
top: "branch2_2_conv3"
}
layer {
name: "concat2"
type: "Concat"
bottom: "branch2_1"
bottom: "branch2_2_conv3"
top: "concat2"
}
layer {
name: "shuffle2"
type: "ShuffleChannel"
bottom: "concat2"
top: "shuffle2"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice3"
type: "Slice"
bottom: "shuffle2"
top: "branch3_1"
top: "branch3_2"
slice_param {
slice_point: 58
axis: 1
}
}
layer {
name: "branch3_2_conv1"
type: "Convolution"
bottom: "branch3_2"
top: "branch3_2_conv1"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch3_2_conv1_bn"
type: "BatchNorm"
bottom: "branch3_2_conv1"
top: "branch3_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch3_2_conv1_scale"
bottom: "branch3_2_conv1"
top: "branch3_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch3_2_conv1_relu"
type: "ReLU"
bottom: "branch3_2_conv1"
top: "branch3_2_conv1"
}
layer {
name: "branch3_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch3_2_conv1"
top: "branch3_2_conv2"
convolution_param {
num_output: 58
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch3_2_conv2_bn"
type: "BatchNorm"
bottom: "branch3_2_conv2"
top: "branch3_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch3_2_conv2_scale"
bottom: "branch3_2_conv2"
top: "branch3_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch3_2_conv3"
type: "Convolution"
bottom: "branch3_2_conv2"
top: "branch3_2_conv3"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch3_2_conv3_bn"
type: "BatchNorm"
bottom: "branch3_2_conv3"
top: "branch3_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch3_2_conv3_scale"
bottom: "branch3_2_conv3"
top: "branch3_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch3_2_conv3_relu"
type: "ReLU"
bottom: "branch3_2_conv3"
top: "branch3_2_conv3"
}
layer {
name: "concat3"
type: "Concat"
bottom: "branch3_1"
bottom: "branch3_2_conv3"
top: "concat3"
}
layer {
name: "shuffle3"
type: "ShuffleChannel"
bottom: "concat3"
top: "shuffle3"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice4"
type: "Slice"
bottom: "shuffle3"
top: "branch4_1"
top: "branch4_2"
slice_param {
slice_point: 58
axis: 1
}
}
layer {
name: "branch4_2_conv1"
type: "Convolution"
bottom: "branch4_2"
top: "branch4_2_conv1"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch4_2_conv1_bn"
type: "BatchNorm"
bottom: "branch4_2_conv1"
top: "branch4_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch4_2_conv1_scale"
bottom: "branch4_2_conv1"
top: "branch4_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch4_2_conv1_relu"
type: "ReLU"
bottom: "branch4_2_conv1"
top: "branch4_2_conv1"
}
layer {
name: "branch4_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch4_2_conv1"
top: "branch4_2_conv2"
convolution_param {
num_output: 58
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch4_2_conv2_bn"
type: "BatchNorm"
bottom: "branch4_2_conv2"
top: "branch4_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch4_2_conv2_scale"
bottom: "branch4_2_conv2"
top: "branch4_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch4_2_conv3"
type: "Convolution"
bottom: "branch4_2_conv2"
top: "branch4_2_conv3"
convolution_param {
num_output: 58
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch4_2_conv3_bn"
type: "BatchNorm"
bottom: "branch4_2_conv3"
top: "branch4_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch4_2_conv3_scale"
bottom: "branch4_2_conv3"
top: "branch4_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch4_2_conv3_relu"
type: "ReLU"
bottom: "branch4_2_conv3"
top: "branch4_2_conv3"
}
layer {
name: "concat4"
type: "Concat"
bottom: "branch4_1"
bottom: "branch4_2_conv3"
top: "concat4"
}
layer {
name: "shuffle4"
type: "ShuffleChannel"
bottom: "concat4"
top: "shuffle4"
shuffle_channel_param {
group: 2
}
}
layer {
name: "branch5_1_conv1"
type: "ConvolutionDepthwise"
bottom: "shuffle4"
top: "branch5_1_conv1"
convolution_param {
num_output: 116
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch5_1_conv1_bn"
type: "BatchNorm"
bottom: "branch5_1_conv1"
top: "branch5_1_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch5_1_conv1_scale"
bottom: "branch5_1_conv1"
top: "branch5_1_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch5_1_conv2"
type: "Convolution"
bottom: "branch5_1_conv1"
top: "branch5_1_conv2"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch5_1_conv2_bn"
type: "BatchNorm"
bottom: "branch5_1_conv2"
top: "branch5_1_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch5_1_conv2_scale"
bottom: "branch5_1_conv2"
top: "branch5_1_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch5_1_conv2_relu"
type: "ReLU"
bottom: "branch5_1_conv2"
top: "branch5_1_conv2"
}
layer {
name: "branch5_2_conv1"
type: "Convolution"
bottom: "shuffle4"
top: "branch5_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch5_2_conv1_bn"
type: "BatchNorm"
bottom: "branch5_2_conv1"
top: "branch5_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch5_2_conv1_scale"
bottom: "branch5_2_conv1"
top: "branch5_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch5_2_conv1_relu"
type: "ReLU"
bottom: "branch5_2_conv1"
top: "branch5_2_conv1"
}
layer {
name: "branch5_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch5_2_conv1"
top: "branch5_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch5_2_conv2_bn"
type: "BatchNorm"
bottom: "branch5_2_conv2"
top: "branch5_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch5_2_conv2_scale"
bottom: "branch5_2_conv2"
top: "branch5_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch5_2_conv3"
type: "Convolution"
bottom: "branch5_2_conv2"
top: "branch5_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch5_2_conv3_bn"
type: "BatchNorm"
bottom: "branch5_2_conv3"
top: "branch5_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch5_2_conv3_scale"
bottom: "branch5_2_conv3"
top: "branch5_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch5_2_conv3_relu"
type: "ReLU"
bottom: "branch5_2_conv3"
top: "branch5_2_conv3"
}
layer {
name: "concat5"
type: "Concat"
bottom: "branch5_1_conv2"
bottom: "branch5_2_conv3"
top: "concat5"
}
layer {
name: "shuffle5"
type: "ShuffleChannel"
bottom: "concat5"
top: "shuffle5"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice6"
type: "Slice"
bottom: "shuffle5"
top: "branch6_1"
top: "branch6_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch6_2_conv1"
type: "Convolution"
bottom: "branch6_2"
top: "branch6_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch6_2_conv1_bn"
type: "BatchNorm"
bottom: "branch6_2_conv1"
top: "branch6_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch6_2_conv1_scale"
bottom: "branch6_2_conv1"
top: "branch6_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch6_2_conv1_relu"
type: "ReLU"
bottom: "branch6_2_conv1"
top: "branch6_2_conv1"
}
layer {
name: "branch6_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch6_2_conv1"
top: "branch6_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch6_2_conv2_bn"
type: "BatchNorm"
bottom: "branch6_2_conv2"
top: "branch6_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch6_2_conv2_scale"
bottom: "branch6_2_conv2"
top: "branch6_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch6_2_conv3"
type: "Convolution"
bottom: "branch6_2_conv2"
top: "branch6_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch6_2_conv3_bn"
type: "BatchNorm"
bottom: "branch6_2_conv3"
top: "branch6_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch6_2_conv3_scale"
bottom: "branch6_2_conv3"
top: "branch6_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch6_2_conv3_relu"
type: "ReLU"
bottom: "branch6_2_conv3"
top: "branch6_2_conv3"
}
layer {
name: "concat6"
type: "Concat"
bottom: "branch6_1"
bottom: "branch6_2_conv3"
top: "concat6"
}
layer {
name: "shuffle6"
type: "ShuffleChannel"
bottom: "concat6"
top: "shuffle6"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice7"
type: "Slice"
bottom: "shuffle6"
top: "branch7_1"
top: "branch7_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch7_2_conv1"
type: "Convolution"
bottom: "branch7_2"
top: "branch7_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch7_2_conv1_bn"
type: "BatchNorm"
bottom: "branch7_2_conv1"
top: "branch7_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch7_2_conv1_scale"
bottom: "branch7_2_conv1"
top: "branch7_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch7_2_conv1_relu"
type: "ReLU"
bottom: "branch7_2_conv1"
top: "branch7_2_conv1"
}
layer {
name: "branch7_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch7_2_conv1"
top: "branch7_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch7_2_conv2_bn"
type: "BatchNorm"
bottom: "branch7_2_conv2"
top: "branch7_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch7_2_conv2_scale"
bottom: "branch7_2_conv2"
top: "branch7_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch7_2_conv3"
type: "Convolution"
bottom: "branch7_2_conv2"
top: "branch7_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch7_2_conv3_bn"
type: "BatchNorm"
bottom: "branch7_2_conv3"
top: "branch7_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch7_2_conv3_scale"
bottom: "branch7_2_conv3"
top: "branch7_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch7_2_conv3_relu"
type: "ReLU"
bottom: "branch7_2_conv3"
top: "branch7_2_conv3"
}
layer {
name: "concat7"
type: "Concat"
bottom: "branch7_1"
bottom: "branch7_2_conv3"
top: "concat7"
}
layer {
name: "shuffle7"
type: "ShuffleChannel"
bottom: "concat7"
top: "shuffle7"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice8"
type: "Slice"
bottom: "shuffle7"
top: "branch8_1"
top: "branch8_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch8_2_conv1"
type: "Convolution"
bottom: "branch8_2"
top: "branch8_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch8_2_conv1_bn"
type: "BatchNorm"
bottom: "branch8_2_conv1"
top: "branch8_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch8_2_conv1_scale"
bottom: "branch8_2_conv1"
top: "branch8_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch8_2_conv1_relu"
type: "ReLU"
bottom: "branch8_2_conv1"
top: "branch8_2_conv1"
}
layer {
name: "branch8_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch8_2_conv1"
top: "branch8_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch8_2_conv2_bn"
type: "BatchNorm"
bottom: "branch8_2_conv2"
top: "branch8_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch8_2_conv2_scale"
bottom: "branch8_2_conv2"
top: "branch8_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch8_2_conv3"
type: "Convolution"
bottom: "branch8_2_conv2"
top: "branch8_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch8_2_conv3_bn"
type: "BatchNorm"
bottom: "branch8_2_conv3"
top: "branch8_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch8_2_conv3_scale"
bottom: "branch8_2_conv3"
top: "branch8_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch8_2_conv3_relu"
type: "ReLU"
bottom: "branch8_2_conv3"
top: "branch8_2_conv3"
}
layer {
name: "concat8"
type: "Concat"
bottom: "branch8_1"
bottom: "branch8_2_conv3"
top: "concat8"
}
layer {
name: "shuffle8"
type: "ShuffleChannel"
bottom: "concat8"
top: "shuffle8"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice9"
type: "Slice"
bottom: "shuffle8"
top: "branch9_1"
top: "branch9_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch9_2_conv1"
type: "Convolution"
bottom: "branch9_2"
top: "branch9_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch9_2_conv1_bn"
type: "BatchNorm"
bottom: "branch9_2_conv1"
top: "branch9_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch9_2_conv1_scale"
bottom: "branch9_2_conv1"
top: "branch9_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch9_2_conv1_relu"
type: "ReLU"
bottom: "branch9_2_conv1"
top: "branch9_2_conv1"
}
layer {
name: "branch9_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch9_2_conv1"
top: "branch9_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch9_2_conv2_bn"
type: "BatchNorm"
bottom: "branch9_2_conv2"
top: "branch9_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch9_2_conv2_scale"
bottom: "branch9_2_conv2"
top: "branch9_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch9_2_conv3"
type: "Convolution"
bottom: "branch9_2_conv2"
top: "branch9_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch9_2_conv3_bn"
type: "BatchNorm"
bottom: "branch9_2_conv3"
top: "branch9_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch9_2_conv3_scale"
bottom: "branch9_2_conv3"
top: "branch9_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch9_2_conv3_relu"
type: "ReLU"
bottom: "branch9_2_conv3"
top: "branch9_2_conv3"
}
layer {
name: "concat9"
type: "Concat"
bottom: "branch9_1"
bottom: "branch9_2_conv3"
top: "concat9"
}
layer {
name: "shuffle9"
type: "ShuffleChannel"
bottom: "concat9"
top: "shuffle9"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice10"
type: "Slice"
bottom: "shuffle9"
top: "branch10_1"
top: "branch10_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch10_2_conv1"
type: "Convolution"
bottom: "branch10_2"
top: "branch10_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch10_2_conv1_bn"
type: "BatchNorm"
bottom: "branch10_2_conv1"
top: "branch10_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch10_2_conv1_scale"
bottom: "branch10_2_conv1"
top: "branch10_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch10_2_conv1_relu"
type: "ReLU"
bottom: "branch10_2_conv1"
top: "branch10_2_conv1"
}
layer {
name: "branch10_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch10_2_conv1"
top: "branch10_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch10_2_conv2_bn"
type: "BatchNorm"
bottom: "branch10_2_conv2"
top: "branch10_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch10_2_conv2_scale"
bottom: "branch10_2_conv2"
top: "branch10_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch10_2_conv3"
type: "Convolution"
bottom: "branch10_2_conv2"
top: "branch10_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch10_2_conv3_bn"
type: "BatchNorm"
bottom: "branch10_2_conv3"
top: "branch10_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch10_2_conv3_scale"
bottom: "branch10_2_conv3"
top: "branch10_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch10_2_conv3_relu"
type: "ReLU"
bottom: "branch10_2_conv3"
top: "branch10_2_conv3"
}
layer {
name: "concat10"
type: "Concat"
bottom: "branch10_1"
bottom: "branch10_2_conv3"
top: "concat10"
}
layer {
name: "shuffle10"
type: "ShuffleChannel"
bottom: "concat10"
top: "shuffle10"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice11"
type: "Slice"
bottom: "shuffle10"
top: "branch11_1"
top: "branch11_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch11_2_conv1"
type: "Convolution"
bottom: "branch11_2"
top: "branch11_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch11_2_conv1_bn"
type: "BatchNorm"
bottom: "branch11_2_conv1"
top: "branch11_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch11_2_conv1_scale"
bottom: "branch11_2_conv1"
top: "branch11_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch11_2_conv1_relu"
type: "ReLU"
bottom: "branch11_2_conv1"
top: "branch11_2_conv1"
}
layer {
name: "branch11_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch11_2_conv1"
top: "branch11_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch11_2_conv2_bn"
type: "BatchNorm"
bottom: "branch11_2_conv2"
top: "branch11_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch11_2_conv2_scale"
bottom: "branch11_2_conv2"
top: "branch11_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch11_2_conv3"
type: "Convolution"
bottom: "branch11_2_conv2"
top: "branch11_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch11_2_conv3_bn"
type: "BatchNorm"
bottom: "branch11_2_conv3"
top: "branch11_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch11_2_conv3_scale"
bottom: "branch11_2_conv3"
top: "branch11_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch11_2_conv3_relu"
type: "ReLU"
bottom: "branch11_2_conv3"
top: "branch11_2_conv3"
}
layer {
name: "concat11"
type: "Concat"
bottom: "branch11_1"
bottom: "branch11_2_conv3"
top: "concat11"
}
layer {
name: "shuffle11"
type: "ShuffleChannel"
bottom: "concat11"
top: "shuffle11"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice12"
type: "Slice"
bottom: "shuffle11"
top: "branch12_1"
top: "branch12_2"
slice_param {
slice_point: 116
axis: 1
}
}
layer {
name: "branch12_2_conv1"
type: "Convolution"
bottom: "branch12_2"
top: "branch12_2_conv1"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch12_2_conv1_bn"
type: "BatchNorm"
bottom: "branch12_2_conv1"
top: "branch12_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch12_2_conv1_scale"
bottom: "branch12_2_conv1"
top: "branch12_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch12_2_conv1_relu"
type: "ReLU"
bottom: "branch12_2_conv1"
top: "branch12_2_conv1"
}
layer {
name: "branch12_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch12_2_conv1"
top: "branch12_2_conv2"
convolution_param {
num_output: 116
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch12_2_conv2_bn"
type: "BatchNorm"
bottom: "branch12_2_conv2"
top: "branch12_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch12_2_conv2_scale"
bottom: "branch12_2_conv2"
top: "branch12_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch12_2_conv3"
type: "Convolution"
bottom: "branch12_2_conv2"
top: "branch12_2_conv3"
convolution_param {
num_output: 116
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch12_2_conv3_bn"
type: "BatchNorm"
bottom: "branch12_2_conv3"
top: "branch12_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch12_2_conv3_scale"
bottom: "branch12_2_conv3"
top: "branch12_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch12_2_conv3_relu"
type: "ReLU"
bottom: "branch12_2_conv3"
top: "branch12_2_conv3"
}
layer {
name: "concat12"
type: "Concat"
bottom: "branch12_1"
bottom: "branch12_2_conv3"
top: "concat12"
}
layer {
name: "shuffle12"
type: "ShuffleChannel"
bottom: "concat12"
top: "shuffle12"
shuffle_channel_param {
group: 2
}
}
layer {
name: "branch13_1_conv1"
type: "ConvolutionDepthwise"
bottom: "shuffle12"
top: "branch13_1_conv1"
convolution_param {
num_output: 232
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch13_1_conv1_bn"
type: "BatchNorm"
bottom: "branch13_1_conv1"
top: "branch13_1_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch13_1_conv1_scale"
bottom: "branch13_1_conv1"
top: "branch13_1_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch13_1_conv2"
type: "Convolution"
bottom: "branch13_1_conv1"
top: "branch13_1_conv2"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch13_1_conv2_bn"
type: "BatchNorm"
bottom: "branch13_1_conv2"
top: "branch13_1_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch13_1_conv2_scale"
bottom: "branch13_1_conv2"
top: "branch13_1_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch13_1_conv2_relu"
type: "ReLU"
bottom: "branch13_1_conv2"
top: "branch13_1_conv2"
}
layer {
name: "branch13_2_conv1"
type: "Convolution"
bottom: "shuffle12"
top: "branch13_2_conv1"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch13_2_conv1_bn"
type: "BatchNorm"
bottom: "branch13_2_conv1"
top: "branch13_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch13_2_conv1_scale"
bottom: "branch13_2_conv1"
top: "branch13_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch13_2_conv1_relu"
type: "ReLU"
bottom: "branch13_2_conv1"
top: "branch13_2_conv1"
}
layer {
name: "branch13_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch13_2_conv1"
top: "branch13_2_conv2"
convolution_param {
num_output: 232
kernel_size: 3
stride: 2
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch13_2_conv2_bn"
type: "BatchNorm"
bottom: "branch13_2_conv2"
top: "branch13_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch13_2_conv2_scale"
bottom: "branch13_2_conv2"
top: "branch13_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch13_2_conv3"
type: "Convolution"
bottom: "branch13_2_conv2"
top: "branch13_2_conv3"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch13_2_conv3_bn"
type: "BatchNorm"
bottom: "branch13_2_conv3"
top: "branch13_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch13_2_conv3_scale"
bottom: "branch13_2_conv3"
top: "branch13_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch13_2_conv3_relu"
type: "ReLU"
bottom: "branch13_2_conv3"
top: "branch13_2_conv3"
}
layer {
name: "concat13"
type: "Concat"
bottom: "branch13_1_conv2"
bottom: "branch13_2_conv3"
top: "concat13"
}
layer {
name: "shuffle13"
type: "ShuffleChannel"
bottom: "concat13"
top: "shuffle13"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice14"
type: "Slice"
bottom: "shuffle13"
top: "branch14_1"
top: "branch14_2"
slice_param {
slice_point: 232
axis: 1
}
}
layer {
name: "branch14_2_conv1"
type: "Convolution"
bottom: "branch14_2"
top: "branch14_2_conv1"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch14_2_conv1_bn"
type: "BatchNorm"
bottom: "branch14_2_conv1"
top: "branch14_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch14_2_conv1_scale"
bottom: "branch14_2_conv1"
top: "branch14_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch14_2_conv1_relu"
type: "ReLU"
bottom: "branch14_2_conv1"
top: "branch14_2_conv1"
}
layer {
name: "branch14_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch14_2_conv1"
top: "branch14_2_conv2"
convolution_param {
num_output: 232
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch14_2_conv2_bn"
type: "BatchNorm"
bottom: "branch14_2_conv2"
top: "branch14_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch14_2_conv2_scale"
bottom: "branch14_2_conv2"
top: "branch14_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch14_2_conv3"
type: "Convolution"
bottom: "branch14_2_conv2"
top: "branch14_2_conv3"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch14_2_conv3_bn"
type: "BatchNorm"
bottom: "branch14_2_conv3"
top: "branch14_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch14_2_conv3_scale"
bottom: "branch14_2_conv3"
top: "branch14_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch14_2_conv3_relu"
type: "ReLU"
bottom: "branch14_2_conv3"
top: "branch14_2_conv3"
}
layer {
name: "concat14"
type: "Concat"
bottom: "branch14_1"
bottom: "branch14_2_conv3"
top: "concat14"
}
layer {
name: "shuffle14"
type: "ShuffleChannel"
bottom: "concat14"
top: "shuffle14"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice15"
type: "Slice"
bottom: "shuffle14"
top: "branch15_1"
top: "branch15_2"
slice_param {
slice_point: 232
axis: 1
}
}
layer {
name: "branch15_2_conv1"
type: "Convolution"
bottom: "branch15_2"
top: "branch15_2_conv1"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch15_2_conv1_bn"
type: "BatchNorm"
bottom: "branch15_2_conv1"
top: "branch15_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch15_2_conv1_scale"
bottom: "branch15_2_conv1"
top: "branch15_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch15_2_conv1_relu"
type: "ReLU"
bottom: "branch15_2_conv1"
top: "branch15_2_conv1"
}
layer {
name: "branch15_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch15_2_conv1"
top: "branch15_2_conv2"
convolution_param {
num_output: 232
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch15_2_conv2_bn"
type: "BatchNorm"
bottom: "branch15_2_conv2"
top: "branch15_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch15_2_conv2_scale"
bottom: "branch15_2_conv2"
top: "branch15_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch15_2_conv3"
type: "Convolution"
bottom: "branch15_2_conv2"
top: "branch15_2_conv3"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch15_2_conv3_bn"
type: "BatchNorm"
bottom: "branch15_2_conv3"
top: "branch15_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch15_2_conv3_scale"
bottom: "branch15_2_conv3"
top: "branch15_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch15_2_conv3_relu"
type: "ReLU"
bottom: "branch15_2_conv3"
top: "branch15_2_conv3"
}
layer {
name: "concat15"
type: "Concat"
bottom: "branch15_1"
bottom: "branch15_2_conv3"
top: "concat15"
}
layer {
name: "shuffle15"
type: "ShuffleChannel"
bottom: "concat15"
top: "shuffle15"
shuffle_channel_param {
group: 2
}
}
layer {
name: "slice16"
type: "Slice"
bottom: "shuffle15"
top: "branch16_1"
top: "branch16_2"
slice_param {
slice_point: 232
axis: 1
}
}
layer {
name: "branch16_2_conv1"
type: "Convolution"
bottom: "branch16_2"
top: "branch16_2_conv1"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch16_2_conv1_bn"
type: "BatchNorm"
bottom: "branch16_2_conv1"
top: "branch16_2_conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch16_2_conv1_scale"
bottom: "branch16_2_conv1"
top: "branch16_2_conv1"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch16_2_conv1_relu"
type: "ReLU"
bottom: "branch16_2_conv1"
top: "branch16_2_conv1"
}
layer {
name: "branch16_2_conv2"
type: "ConvolutionDepthwise"
bottom: "branch16_2_conv1"
top: "branch16_2_conv2"
convolution_param {
num_output: 232
kernel_size: 3
stride: 1
pad: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch16_2_conv2_bn"
type: "BatchNorm"
bottom: "branch16_2_conv2"
top: "branch16_2_conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch16_2_conv2_scale"
bottom: "branch16_2_conv2"
top: "branch16_2_conv2"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch16_2_conv3"
type: "Convolution"
bottom: "branch16_2_conv2"
top: "branch16_2_conv3"
convolution_param {
num_output: 232
kernel_size: 1
stride: 1
pad: 0
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "branch16_2_conv3_bn"
type: "BatchNorm"
bottom: "branch16_2_conv3"
top: "branch16_2_conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "branch16_2_conv3_scale"
bottom: "branch16_2_conv3"
top: "branch16_2_conv3"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "branch16_2_conv3_relu"
type: "ReLU"
bottom: "branch16_2_conv3"
top: "branch16_2_conv3"
}
layer {
name: "concat16"
type: "Concat"
bottom: "branch16_1"
bottom: "branch16_2_conv3"
top: "concat16"
}
layer {
name: "shuffle16"
type: "ShuffleChannel"
bottom: "concat16"
top: "shuffle16"
shuffle_channel_param {
group: 2
}
}
layer {
name: "conv5"
type: "Convolution"
bottom: "shuffle16"
top: "conv5"
convolution_param {
num_output: 1024
pad: 0
kernel_size: 1
stride: 1
bias_term: false
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv5_bn"
type: "BatchNorm"
bottom: "conv5"
top: "conv5"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv5_scale"
bottom: "conv5"
top: "conv5"
type: "Scale"
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv5_relu"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool_ave"
type: "Pooling"
bottom: "conv5"
top: "pool_ave"
pooling_param {
global_pooling : true
pool: AVE
}
}
layer {
name: "fc1000"
type: "Convolution"
bottom: "pool_ave"
top: "fc1000"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1000
kernel_size: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "fc4"
type: "Convolution"
bottom: "fc1000"
top: "fc4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4
kernel_size: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "fc4"
bottom: "label"
top: "loss"
}
结语
如果您有修改意见或问题,欢迎留言或者通过邮箱和我联系。
手打很辛苦,如果我的文章对您有帮助,转载请注明出处。