180725 Mask-RCNN中get_anchors()

版权声明:本文为博主原创文章,转载请注明出处。 https://blog.csdn.net/qq_33039859/article/details/81203410

这里写图片描述

  • Mask-RCNN中对于固定尺寸的图片,其待选anchors锚点的坐标及个数是固定的;
  • 对于固定尺寸的图片,提前计算其所有锚点所在位置,可加速计算过程.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 14:12:45 2018

@author: bruceelau
"""

import numpy as np
import matplotlib.pyplot as plt
import math
import os
import sys
p =  os.path.abspath('../')
sys.path.append(p)
from config import Config
from mrcnn import utils
import random
import matplotlib.image as mpimg
import PIL.Image as Image
#%%
mode='training'
config = Config
model_dir = ''


def compute_backbone_shapes(config, image_shape):
        """Computes the width and height of each stage of the backbone network.

        Returns:
            [N, (height, width)]. Where N is the number of stages
        """
        if callable(config.BACKBONE):
            return config.COMPUTE_BACKBONE_SHAPE(image_shape)

        # Currently supports ResNet only
        assert config.BACKBONE in ["resnet50", "resnet101"]
        return np.array(
            [[int(math.ceil(image_shape[0] / stride)),
                int(math.ceil(image_shape[1] / stride))]
                for stride in config.BACKBONE_STRIDES])

class MaskRCNN():
    """Encapsulates the Mask RCNN model functionality.
IMAGE_META_SIZE
    The actual Keras model is in the keras_model property.
    """

    def __init__(self, mode, config, model_dir):
        """
        mode: Either "training" or "inference"
        config: A Sub-class of the Config class
        model_dir: Directory to save training logs and trained weights
        """
        assert mode in ['training', 'inference']
        self.mode = mode
        self.config = config
        self.model_dir = model_dir

    def get_anchors(self, image_shape):
            """Returns anchor pyramid for the given image size."""
            backbone_shapes = compute_backbone_shapes(self.config, image_shape)
            # Cache anchors and reuse if image shape is the same
            if not hasattr(self, "_anchor_cache"):
                self._anchor_cache = {}
            if not tuple(image_shape) in self._anchor_cache:
                # Generate Anchors
                a = utils.generate_pyramid_anchors(
                    self.config.RPN_ANCHOR_SCALES,
                    self.config.RPN_ANCHOR_RATIOS,
                    backbone_shapes,
                    self.config.BACKBONE_STRIDES,
                    self.config.RPN_ANCHOR_STRIDE)
                # Keep a copy of the latest anchors in pixel coordinates because
                # it's used in inspect_model notebooks.
                # TODO: Remove this after the notebook are refactored to not use it
                self.anchors = a
                # Normalize coordinates
                self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
            return self._anchor_cache[tuple(image_shape)],a

c = MaskRCNN(mode=mode, config=config, model_dir=model_dir)
d1,d2 = c.get_anchors((1024,1024,3))
#%%


#%%
box = np.zeros(d2.shape)
box[:,0]=d2[:,0]+512
box[:,1]=d2[:,1]+512
box[:,2]=d2[:,2] - d2[:,0]
box[:,3]=d2[:,3] - d2[:,1]
#%%
import matplotlib.patches as patches
bg = (np.ones((2048,2048,3))*255).astype('uint8')
im =  Image.open('pandas.jpg')
im = im.resize((1024,1024))
im = np.asarray(im)
bg[511:1535,511:1535,:]=im
fig = plt.figure(figsize=(15,8))
for i in range(2):

    ax = fig.add_subplot(1,2,i+1)
    ax.xaxis.tick_top()
    ax.imshow(bg)
    l = len(box)
    if i==0:
        n = 100
        alpha=0.5
    else:
        n = 20000
        alpha=0.02
    r = random.sample(list(np.arange(l)),n)
    #for i in range(len(box)):
    for i in r:
        rect = patches.Rectangle((box[i][0], box[i][1]),box[i][2],box[i][3],fill=False,alpha=alpha,color='r',lw=2)
        ax.add_patch(rect)
        ax.set_xlabel('The box number is %d'%n,color='r',size=20)
    #plt.ioff()
plt.savefig('boxex.png')

猜你喜欢

转载自blog.csdn.net/qq_33039859/article/details/81203410