siamese-fc matlab tracking代码解读
tracking部分包含以下代码
1.run_tracker.m
function run_tracker(video, visualization)
% RUN_TRACKER is the external function of the tracker - does initialization and calls tracker.m
%初始化,设置matconvnet、util、tracking
startup;
%% Parameters that should have no effect on the result.
%视频路径
params.video = video;
%可视化
params.visualization = visualization;
%shiyonggpu
params.gpus = 1;
%% Parameters that should be recorded.
% params.foo = 'blah';
%% Call the main tracking function
%跟踪主程序
tracker(params);
end
2.tracker.m
% 超参
function bboxes = tracker(varargin)
p.numScale = 3;% 尺度个数
p.scaleStep = 1.0375;% 尺度比例
p.scalePenalty = 0.9745;% 尺度惩罚因子
p.scaleLR = 0.59; % 尺度更新衰减系数
p.responseUp = 16; % 对17x17响应图上采样有利于提升准确度
p.windowing = 'cosine'; % 惩罚大的位移
p.wInfluence = 0.176; % 窗口影响因子(凸和)
p.net = '2016-08-17_gray025.net.mat';% 网络模型名称
% 执行、可视化、基准测试参数
p.video = 'vot15_bag';% 图像数据路径上一层路径
p.visualization = false;% 非可视化
p.gpus = 1;% 使用gpu
p.bbox_output = false;% 是否输出每一帧的目标位置
p.fout = -1;%
% 来自网络架构的参数,必须与训练参数一致
p.exemplarSize = 127; % 输入尺寸
p.instanceSize = 255; % 搜索尺寸
p.scoreSize = 17;% 得分置信度图尺寸(输出尺寸)
p.totalStride = 8;
p.contextAmount = 0.5; % context amount for the exemplar
p.subMean = false;
% siamFC前缀和ids
p.prefix_z = 'a_'; % 识别exemplar的网络层
p.prefix_x = 'b_'; % 识别instance的网络层
p.prefix_join = 'xcorr';
p.prefix_adj = 'adjust';
p.id_feat_z = 'a_feat';
p.id_score = 'score';
p = vl_argparse(p, varargin);% 利用varargin覆盖默认参数
% 获取特定环境默认路径
p = env_paths_tracking(p);% 网络模型、视频序列路径
% 加载ImageNet视频统计信息
if exist(p.stats_path,'file')
stats = load(p.stats_path);
else
warning('No stats found at %s', p.stats_path);
stats = [];
end
% 拷贝预训练模型,并加载这两个模型,为什么一个用gpu一个不用?
net_z = load_pretrained([p.net_base_path p.net], p.gpus);
net_x = load_pretrained([p.net_base_path p.net], []);
% 加载视频序列,获取连续图像,初始目标位置,初始目标大小
[imgFiles, targetPosition, targetSize] = load_video_info(p.seq_base_path, p.video);
nImgs = numel(imgFiles);% 连续图像帧数
startFrame = 1;% 起始帧
% 将网络分离为两个分支
% exemplar分支用于计算目标特征 (对于每个视频只是用一次)
remove_layers_from_prefix(net_z, p.prefix_x);% net_z中移除prefix_x,
remove_layers_from_prefix(net_z, p.prefix_join);% net_z中移除prefix_join,
remove_layers_from_prefix(net_z, p.prefix_adj);% net_z中移除prefix_adj
% instance分支计算搜索区域x的特征以及与z特征的关相关度
remove_layers_from_prefix(net_x, p.prefix_z);% net_x中移除prefix_z,
zFeatId = net_z.getVarIndex(p.id_feat_z);%
scoreId = net_x.getVarIndex(p.id_score);
%获取第一帧图像,转换成single类型(4字节存储),然后转换到gpu上
im = gpuArray(single(imgFiles{startFrame}));
% 如果是灰度图像则复制一个通道去适配滤波器的size
if(size(im, 3)==1)
im = repmat(im, [1 1 3]);
end
% 初始化播放器
videoPlayer = [];
if p.visualization && isToolboxAvailable('Computer Vision System Toolbox')
videoPlayer = vision.VideoPlayer('Position', [100 100 [size(im,2), size(im,1)]+30]);
end
% 获取平均值用于padding
avgChans = gather([mean(mean(im(:,:,1))) mean(mean(im(:,:,2))) mean(mean(im(:,:,3)))]);
wc_z = targetSize(2) + p.contextAmount*sum(targetSize);% w + 0.5 * sum(w+h)
hc_z = targetSize(1) + p.contextAmount*sum(targetSize);% h + 0.5 * sum(w+h)
s_z = sqrt(wc_z*hc_z);
scale_z = p.exemplarSize / s_z;% 127 / s_z
% 初始化exemplar
%提取点击位置附近区域并缩放至127x127x3大小
[z_crop, ~] = get_subwindow_tracking(im, targetPosition, [p.exemplarSize p.exemplarSize], [round(s_z) round(s_z)], avgChans);
if p.subMean
z_crop = bsxfun(@minus, z_crop, reshape(stats.z.rgbMean, [1 1 3]));
end
d_search = (p.instanceSize - p.exemplarSize)/2;% (255-127)/2=64
pad = d_search/scale_z;% 64/scale_z
s_x = s_z + 2*pad;
% 尺度的范围
min_s_x = 0.2*s_x;
max_s_x = 5*s_x;
switch p.windowing
case 'cosine'
window = single(hann(p.scoreSize*p.responseUp) * hann(p.scoreSize*p.responseUp)');
case 'uniform'
window = single(ones(p.scoreSize*p.responseUp, p.scoreSize*p.responseUp));
end
% window归一化
window = window / sum(window(:));
% scales = 1/1.0375, 1, 1.0375,ceil向上取整,floor向下取整
scales = (p.scaleStep .^ ((ceil(p.numScale/2)-p.numScale) : floor(p.numScale/2)));
% 评估离线训练网络的样本z特征
net_z.eval({'exemplar', z_crop});% 这句没理解
z_features = net_z.vars(zFeatId).value;
z_features = repmat(z_features, [1 1 1 p.numScale]);
% nlmgs * 4个结果
bboxes = zeros(nImgs, 4);
%开始循环跟踪了
tic;
for i = startFrame:nImgs
if i>startFrame
% 在GPU上加载图像
im = gpuArray(single(imgFiles{i}));
% 如果是灰度图像则复制一个通道去适配滤波器的size
if(size(im, 3)==1)
im = repmat(im, [1 1 3]);
end
%搜索区域乘以尺度
scaledInstance = s_x .* scales;
%三尺度目标尺寸
scaledTarget = [targetSize(1) .* scales; targetSize(2) .* scales];
% 在上一帧目标位置附近提取多层crops
x_crops = make_scale_pyramid(im, targetPosition, scaledInstance, p.instanceSize, avgChans, stats, p);
% 计算最大响应的目标位置和尺度
[newTargetPosition, newScale] = tracker_eval(net_x, round(s_x), scoreId, z_features, x_crops, targetPosition, window, p);
targetPosition = gather(newTargetPosition);
%更新s_x
s_x = max(min_s_x, min(max_s_x, (1-p.scaleLR)*s_x + p.scaleLR*scaledInstance(newScale)));
% 更新尺寸
targetSize = (1-p.scaleLR)*targetSize + p.scaleLR*[scaledTarget(1,newScale) scaledTarget(2,newScale)];
else
% at the first frame output position and size passed as input (ground truth)
end
% 更新坐标
rectPosition = [targetPosition([2,1]) - targetSize([2,1])/2, targetSize([2,1])];
% output bbox in the original frame coordinates
oTargetPosition = targetPosition; % .* frameSize ./ newFrameSize;
oTargetSize = targetSize; % .* frameSize ./ newFrameSize;
bboxes(i, :) = [oTargetPosition([2,1]) - oTargetSize([2,1])/2, oTargetSize([2,1])];
if p.visualization
if isempty(videoPlayer)
figure(1), imshow(im/255);
figure(1), rectangle('Position', rectPosition, 'LineWidth', 4, 'EdgeColor', 'y');
drawnow
fprintf('Frame %d\n', startFrame+i);
else
im = gather(im)/255;
im = insertShape(im, 'Rectangle', rectPosition, 'LineWidth', 4, 'Color', 'yellow');
% Display the annotated video frame using the video player object.
step(videoPlayer, im);
end
end
if p.bbox_output
fprintf(p.fout,'%.2f,%.2f,%.2f,%.2f\n', bboxes(i, :));
end
end
bboxes = bboxes(startFrame : i, :);
end