前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >Caffe2 - (二十五) Detectron 之 utils 函数(3)

Caffe2 - (二十五) Detectron 之 utils 函数(3)

作者头像
AIHGF
发布2019-02-18 10:23:14
5980
发布2019-02-18 10:23:14
举报
文章被收录于专栏:AIUAIAIUAI

Caffe2 - (二十五) Detectron 之 utils 函数(3)

1. lr_policy.py

代码语言:javascript
复制
"""Learning rate policies."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import numpy as np

from core.config import cfg


def get_lr_at_iter(it):
    """
    根据 cfg.SOLVER 中的设置来得到在第 it 次迭代时的学习率 learning rate.    
    """
    lr = get_lr_func()(it)
    if it < cfg.SOLVER.WARM_UP_ITERS:
        method = cfg.SOLVER.WARM_UP_METHOD
        if method == 'constant':
            warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
        elif method == 'linear':
            alpha = it / cfg.SOLVER.WARM_UP_ITERS
            warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
        else:
            raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
        lr *= warmup_factor
    return np.float32(lr)


# ----------------------------------------------------------------------------  
# 学习率策略函数 Learning rate policy functions
# ---------------------------------------------------------------------------- 

def lr_func_steps_with_lrs(cur_iter):
    """
    当 cfg.SOLVER.LR_POLICY = 'steps_with_lrs' 时,
    在指定的迭代次数将学习率改变为指定的值,如:

    cfg.SOLVER.MAX_ITER: 90
    cfg.SOLVER.STEPS:    [0,    60,    80]
    cfg.SOLVER.LRS:      [0.02, 0.002, 0.0002]
    for cur_iter in [0, 59]   use 0.02
                 in [60, 79]  use 0.002
                 in [80, inf] use 0.0002
    """
    ind = get_step_index(cur_iter)
    return cfg.SOLVER.LRS[ind]


def lr_func_steps_with_decay(cur_iter):
    """
    当 cfg.SOLVER.LR_POLICY = 'steps_with_decay' 时,
    基于公式:lr = base_lr * gamma ** lr_step_count 来改变指定迭代次数的学习率. 如:

    cfg.SOLVER.MAX_ITER: 90
    cfg.SOLVER.STEPS:    [0,    60,    80]
    cfg.SOLVER.BASE_LR:  0.02
    cfg.SOLVER.GAMMA:    0.1
    for cur_iter in [0, 59]   use 0.02 = 0.02 * 0.1 ** 0
                 in [60, 79]  use 0.002 = 0.02 * 0.1 ** 1
                 in [80, inf] use 0.0002 = 0.02 * 0.1 ** 2
    """
    ind = get_step_index(cur_iter)
    return cfg.SOLVER.BASE_LR * cfg.SOLVER.GAMMA ** ind


def lr_func_step(cur_iter):
    """
    当 cfg.SOLVER.LR_POLICY = 'step' 时,
    固定步长的去改变学习率:
    lr = base_lr * gamma * (cur_iter/step_size)
    """
    return (cfg.SOLVER.BASE_LR * cfg.SOLVER.GAMMA ** (cur_iter // cfg.SOLVER.STEP_SIZE))


# ---------------------------------------------------------------------------- 
# 学习率相关的辅助函数 Helpers
# ---------------------------------------------------------------------------- 

def get_step_index(cur_iter):
    """
    给定迭代次数,寻找学习率所在位置.
    Given an iteration, find which learning rate step we're at.
    """
    assert cfg.SOLVER.STEPS[0] == 0, 'The first step should always start at 0.'
    steps = cfg.SOLVER.STEPS + [cfg.SOLVER.MAX_ITER]
    for ind, step in enumerate(steps):  # NoQA
        if cur_iter < step:
            break
    return ind - 1


def get_lr_func():
    policy = 'lr_func_' + cfg.SOLVER.LR_POLICY
    if policy not in globals():
        raise NotImplementedError(
            'Unknown LR policy: {}'.format(cfg.SOLVER.LR_POLICY))
    else:
        return globals()[policy]

2. net.py

代码语言:javascript
复制
"""
Caffe2 网络所用到的 Helper 函数 (i.e., operator graphs).
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from collections import OrderedDict
import cPickle as pickle
import logging
import numpy as np
import os
import pprint
import yaml

from caffe2.python import core
from caffe2.python import workspace
from core.config import cfg
from utils.io import save_object
import utils.c2 as c2_utils

logger = logging.getLogger(__name__)


def initialize_from_weights_file(model, weights_file, broadcast=True):
    """
    从权重文件初始化权重.
    如果是多 GPUs,则所有 GPUs 加载的权重是相同的,除非设置 broadcast=False.
    """
    initialize_gpu_from_weights_file(model, weights_file, gpu_id=0) # 默认加载权重到 GPU 0.
    if broadcast:
        broadcast_parameters(model) # 


def initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
    """
    初始化 ops 网路到指定 gpu_id 的 GPU.
    如果使用 CUDA_VISIBLE_DEVICES 到指定 GPUs,则 Caffe2 会自动地将逻辑 GPU ids(从 0 开始) 映射到 CUDA_VISIBLE_DEVICES 指定的物理 GPUs.
    """
    logger.info('Loading from: {}'.format(weights_file))
    ws_blobs = workspace.Blobs()
    with open(weights_file, 'r') as f:
        src_blobs = pickle.load(f)
    if 'cfg' in src_blobs:
        saved_cfg = yaml.load(src_blobs['cfg'])
        configure_bbox_reg_weights(model, saved_cfg) # 考虑兼容性
    if 'blobs' in src_blobs:
        # Backwards compat--dictionary used to be only blobs, now they are
        # stored under the 'blobs' key
        src_blobs = src_blobs['blobs']
    # 只对 GPU 0 初始化权重
    unscoped_param_names = OrderedDict()  # 以 model 顺序打印
    for blob in model.params:
        unscoped_param_names[c2_utils.UnscopeName(str(blob))] = True
    with c2_utils.NamedCudaScope(gpu_id):
        for unscoped_param_name in unscoped_param_names.keys():
            if (unscoped_param_name.find(']_') >= 0 and unscoped_param_name not in src_blobs):
                # 从预训练模型初始化权重的特殊情况:
                # 如果 blob name '_[xyz]_foo' 在 model.params 中,但不在初始化的 blob dict 中,则加载 'foo' blob 到 '_[xyz]_foo' 中.
                src_name = unscoped_param_name[unscoped_param_name.find(']_') + 2:]
            else:
                src_name = unscoped_param_name
            if src_name not in src_blobs:
                logger.info('{:s} not found'.format(src_name))
                continue
            dst_name = core.ScopedName(unscoped_param_name)
            has_momentum = src_name + '_momentum' in src_blobs
            has_momentum_str = ' [+ momentum]' if has_momentum else ''
            logger.info('{:s}{:} loaded from weights file into {:s}: {}'.
                        format(src_name, has_momentum_str, dst_name, src_blobs[src_name].shape))
            if dst_name in ws_blobs:
                # 如果 blob 已经在 workspace 中,确保其 shape 与 loaded blob 的 shape 相同.
                ws_blob = workspace.FetchBlob(dst_name)
                assert ws_blob.shape == src_blobs[src_name].shape, \
                    ('Workspace blob {} with shape {} does not match '
                     'weights file shape {}').format(src_name, ws_blob.shape,
                                                     src_blobs[src_name].shape)
            workspace.FeedBlob(dst_name, src_blobs[src_name].astype(np.float32, copy=False))
            if has_momentum:
                workspace.FeedBlob(dst_name + '_momentum',
                                   src_blobs[src_name + '_momentum'].astype(np.float32, copy=False))

    # 对于在 weights file 里但没有被当前 model 使用的 blobs,这里进行了保留.
    # 将这些 blobs 加载到 CPU 内存里的 '__preserve__/' namescope.
    # 这些 blobs 也会被保存 model 到 weights file.
    # 这种处理方式对于 Faster R-CNN 的交替优化(alternationg optimization) 有用,
    # one step 中未被使用的 blobs 仍可以被保留到 forward,并用于初始化 another step.
    for src_name in src_blobs.keys():
        if (src_name not in unscoped_param_names and not src_name.endswith('_momentum') and
                    src_blobs[src_name] is not None):
            with c2_utils.CpuScope():
                workspace.FeedBlob('__preserve__/{:s}'.format(src_name), src_blobs[src_name])
                logger.info('{:s} preserved in workspace (unused)'.format(src_name))


def save_model_to_weights_file(weights_file, model):
    """
    将 model weights 保存为 dict 并序列化保存到文件 file.
    将 GPU device scoped names 映射到了 unscoped names (e.g., 'gpu_0/conv1_w' -> 'conv1_w').
    """
    logger.info('Saving parameters and momentum to {}'.format(os.path.abspath(weights_file)))
    blobs = {}
    # 保存全部参数
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # 保存动量 momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # 保存保留的 blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = c2_utils.UnscopeName(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(' {:s} -> {:s} (preserved)'.format(scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)


def broadcast_parameters(model):
    """
    从 GPU 0 复制参数到 GPUs1 上对应的参数 blobs, cfg.NUM_GPUS - 1.
    """
    if cfg.NUM_GPUS == 1:
        # 单张 GPU 时无操作.
        return

    def _do_broadcast(all_blobs):
        assert len(all_blobs) % cfg.NUM_GPUS == 0, \
            ('Unexpected value for NUM_GPUS. Make sure you are not '
             'running single-GPU inference with NUM_GPUS > 1.')
        blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
        for i in range(blobs_per_gpu):
            blobs = [p for p in all_blobs[i::blobs_per_gpu]]
            data = workspace.FetchBlob(blobs[0])
            logger.debug('Broadcasting {} to'.format(str(blobs[0])))
            for i, p in enumerate(blobs[1:]):
                logger.debug(' |-> {}'.format(str(p)))
                with c2_utils.CudaScope(i + 1):
                    workspace.FeedBlob(p, data)

    _do_broadcast(model.params)
    _do_broadcast([b + '_momentum' for b in model.TrainableParams()])


def sum_multi_gpu_blob(blob_name):
    """
    返回标量scalar blob 在多个 GPUs 上的和.
    """
    val = 0
    for i in range(cfg.NUM_GPUS):
        val += float(workspace.FetchBlob('gpu_{}/{}'.format(i, blob_name)))
    return val


def average_multi_gpu_blob(blob_name):
    """
    返回标量scalar blob 在多个 GPUs 上的平均值.
    """
    return sum_multi_gpu_blob(blob_name) / cfg.NUM_GPUS


def print_net(model, namescope='gpu_0'):
    """
    打印网络.
    """
    logger.info('Printing model: {}'.format(model.net.Name()))
    op_list = model.net.Proto().op
    for op in op_list:
        input_name = op.input
        # 简单起见,只打印第一个输出. 如果有多个输出,不建议这样做.
        output_name = str(op.output[0])
        op_type = op.type
        op_name = op.name

        if namescope is None or output_name.startswith(namescope):
            # 只打印 forward pass network
            if output_name.find('grad') >= 0:
                break

            output_shape = workspace.FetchBlob(output_name).shape
            first_blob = True
            op_label = op_type + (op_name if op_name == '' else ':' + op_name)
            suffix = ' ------- (op: {})'.format(op_label)
            for j in range(len(input_name)):
                if input_name[j] in model.params:
                    continue
                input_blob = workspace.FetchBlob(input_name[j])
                if isinstance(input_blob, np.ndarray):
                    input_shape = input_blob.shape
                    logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(
                        c2_utils.UnscopeName(str(input_name[j])),
                        '{}'.format(input_shape),
                        c2_utils.UnscopeName(str(output_name)),
                        '{}'.format(output_shape),
                        suffix))
                    if first_blob:
                        first_blob = False
                        suffix = ' ------|'
    logger.info('End of model: {}'.format(model.net.Name()))


def configure_bbox_reg_weights(model, saved_cfg):
    """
    保持与旧 models 的兼容性.
    old models trained with bounding box regression
    mean/std normalization (instead of fixed weights).
    """
    if 'MODEL' not in saved_cfg or 'BBOX_REG_WEIGHTS' not in saved_cfg.MODEL:
        logger.warning('Model from weights file was trained before config key '
                       'MODEL.BBOX_REG_WEIGHTS was added. Forcing '
                       'MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.) to ensure '
                       'correct **inference** behavior.')
        cfg.MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.)
        logger.info('New config:')
        logger.info(pprint.pformat(cfg))
        assert not model.train, (
            'This model was trained with an older version of the code that '
            'used bounding box regression mean/std normalization. It can no '
            'longer be used for training. To upgrade it to a trainable model '
            'please use fb/compat/convert_bbox_reg_normalized_model.py.'
        )

3. subprocess.py

代码语言:javascript
复制
"""
并行计算相关的; Primitives for running multiple single-GPU jobs in parallel over subranges of data.
用于运行 multi-GPU 推断 inference.
Subprocesses 用于避免 GIL,因为推断可能会涉及 non-trivial amounts of Python code.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import os
import yaml
import numpy as np
import subprocess
import cPickle as pickle
from six.moves import shlex_quote

from core.config import cfg

import logging
logger = logging.getLogger(__name__)


def process_in_parallel(tag, total_range_size, binary, output_dir):
    """
    并行地运行 cfg.NUM_GPUS 次指定的 binary,每一次作为一个 subprocess,使用一个 GPU.
    binary 必须可以接受命令行参数 `--range {start} {end}`,其指定了数据处理范围(data processing range).
    """
    # Snapshot 当前 cfg 状态state,以传递到推断 inference subprocesses.
    cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
    with open(cfg_file, 'w') as f:
        yaml.dump(cfg, stream=f)
    subprocess_env = os.environ.copy()
    processes = []
    subinds = np.array_split(range(total_range_size), cfg.NUM_GPUS)
    for i in range(cfg.NUM_GPUS):
        start = subinds[i][0]
        end = subinds[i][-1] + 1
        subprocess_env['CUDA_VISIBLE_DEVICES'] = str(i)
        cmd = '{binary} --range {start} {end} --cfg {cfg_file} NUM_GPUS 1'
        cmd = cmd.format(
            binary=shlex_quote(binary),
            start=int(start),
            end=int(end),
            cfg_file=shlex_quote(cfg_file)
        )
        logger.info('{} range command {}: {}'.format(tag, i, cmd))
        if i == 0:
            subprocess_stdout = subprocess.PIPE
        else:
            filename = os.path.join(
                output_dir, '%s_range_%s_%s.stdout' % (tag, start, end)
            )
            subprocess_stdout = open(filename, 'w')  # NOQA (close below)
        p = subprocess.Popen(
            cmd,
            shell=True,
            env=subprocess_env,
            stdout=subprocess_stdout,
            stderr=subprocess.STDOUT,
            bufsize=1
        )
        processes.append((i, p, start, end, subprocess_stdout))

    # 从 inference processes 输出日志 Log,并整理其结果. 
    outputs = []
    for i, p, start, end, subprocess_stdout in processes:
        log_subprocess_output(i, p, output_dir, tag, start, end)
        if isinstance(subprocess_stdout, file):  # NOQA (Python 2 for now)
            subprocess_stdout.close()
        range_file = os.path.join(output_dir, '%s_range_%s_%s.pkl' % (tag, start, end))
        range_data = pickle.load(open(range_file))
        outputs.append(range_data)
    return outputs


def log_subprocess_output(i, p, output_dir, tag, start, end):
    """
    捕捉每个 subprocess 的输出,并将其记录在父进程(parent process)中.
    第一个 subprocess 的输出被实时记录.
    其它 subprocess 的输出被缓存,并在 subprocesses 结束时,一次性依次打印输出.
    """
    outfile = os.path.join(output_dir, '%s_range_%s_%s.stdout' % (tag, start, end) )
    logger.info('# ' + '-' * 76 + ' #')
    logger.info('stdout of subprocess %s with range [%s, %s]' % (i, start + 1, end) )
    logger.info('# ' + '-' * 76 + ' #')
    if i == 0:
        # 实时地从第一个 subprocess 记录输出.
        with open(outfile, 'w') as f:
            for line in iter(p.stdout.readline, b''):
                print(line.rstrip())
                f.write(str(line))
        p.stdout.close()
        ret = p.wait()
    else:
        # 对于 subprocesses >= 1, 等待并序列化到 log 文件.
        ret = p.wait()
        with open(outfile, 'r') as f:
            print(''.join(f.readlines()))
    assert ret == 0, 'Range subprocess failed (exit code: {})'.format(ret)

4. timer.py

代码语言:javascript
复制
"""
计时相关的函数.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import time


class Timer(object):
    """A simple timer."""

    def __init__(self):
        self.reset()

    def tic(self):
        # using time.time instead of time.clock because time time.clock
        # does not normalize for multithreading
        self.start_time = time.time()

    def toc(self, average=True):
        self.diff = time.time() - self.start_time
        self.total_time += self.diff
        self.calls += 1
        self.average_time = self.total_time / self.calls
        if average:
            return self.average_time
        else:
            return self.diff

    def reset(self):
        self.total_time = 0.
        self.calls = 0
        self.start_time = 0.
        self.diff = 0.
        self.average_time = 0.

5. logging.py

代码语言:javascript
复制
"""
日志 logging 相关的函数
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from collections import deque
from email.mime.text import MIMEText
import json
import logging
import numpy as np
import smtplib
import sys

# 打印低精度的浮点值(lower precision floating point values),而不是默认的 FLOAT_REPR.
json.encoder.FLOAT_REPR = lambda o: format(o, '.6f')


def log_json_stats(stats, sort_keys=True):
    print('json_stats: {:s}'.format(json.dumps(stats, sort_keys=sort_keys)))


class SmoothedValue(object):
    """
    跟踪一系列的值,并在一个窗口window 或全局序列平均上进行平滑值.
    """

    def __init__(self, window_size):
        self.deque = deque(maxlen=window_size)
        self.series = []
        self.total = 0.0
        self.count = 0

    def AddValue(self, value):
        self.deque.append(value)
        self.series.append(value)
        self.count += 1
        self.total += value

    def GetMedianValue(self):
        return np.median(self.deque)

    def GetAverageValue(self):
        return np.mean(self.deque)

    def GetGlobalAverageValue(self):
        return self.total / self.count


def send_email(subject, body, to):
    s = smtplib.SMTP('localhost')
    mime = MIMEText(body)
    mime['Subject'] = subject
    mime['To'] = to
    s.sendmail('detectron', to, mime.as_string())


def setup_logging(name):
    FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
    # Manually clear root loggers to prevent any module that may have called
    # logging.basicConfig() from blocking our logging setup
    logging.root.handlers = []
    logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
    logger = logging.getLogger(name)
    return logger

6. vis.py

代码语言:javascript
复制
"""
Detection 可视化结果输出模块.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import cv2
import numpy as np
import os

import pycocotools.mask as mask_util

from utils.colormap import colormap # 定义的 color list
import utils.env as envu
import utils.keypoints as keypoint_utils # COCO keypoints

# Matplotlib requires certain adjustments in some environments
# 必须在 importing matplotlib 
envu.set_up_matplotlib()
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon

plt.rcParams['pdf.fonttype'] = 42  # For editing in Adobe Illustrator


_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)


def kp_connections(keypoints): # keypoints 间的连接关系
    kp_lines = [
        [keypoints.index('left_eye'), keypoints.index('right_eye')],
        [keypoints.index('left_eye'), keypoints.index('nose')],
        [keypoints.index('right_eye'), keypoints.index('nose')],
        [keypoints.index('right_eye'), keypoints.index('right_ear')],
        [keypoints.index('left_eye'), keypoints.index('left_ear')],
        [keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
        [keypoints.index('right_elbow'), keypoints.index('right_wrist')],
        [keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
        [keypoints.index('left_elbow'), keypoints.index('left_wrist')],
        [keypoints.index('right_hip'), keypoints.index('right_knee')],
        [keypoints.index('right_knee'), keypoints.index('right_ankle')],
        [keypoints.index('left_hip'), keypoints.index('left_knee')],
        [keypoints.index('left_knee'), keypoints.index('left_ankle')],
        [keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
        [keypoints.index('right_hip'), keypoints.index('left_hip')],
    ]
    return kp_lines


def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):
    """
    对测试代码生成的 class boxes/segms/keyps 格式进行转换,
    得到新的 boxes, segms, keyps, classes.
    """
    box_list = [b for b in cls_boxes if len(b) > 0]
    if len(box_list) > 0:
        boxes = np.concatenate(box_list)
    else:
        boxes = None
    if cls_segms is not None:
        segms = [s for slist in cls_segms for s in slist]
    else:
        segms = None
    if cls_keyps is not None:
        keyps = [k for klist in cls_keyps for k in klist]
    else:
        keyps = None
    classes = []
    for j in range(len(cls_boxes)):
        classes += [j] * len(cls_boxes[j])
    return boxes, segms, keyps, classes


def get_class_string(class_index, score, dataset):
    # 类别class 字符串
    class_text = dataset.classes[class_index] if dataset is not None else \
        'id{:d}'.format(class_index)
    return class_text + ' {:0.2f}'.format(score).lstrip('0')


def vis_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=1):
    """
    可视化单个二值 binary mask.
    """

    img = img.astype(np.float32)
    idx = np.nonzero(mask)

    img[idx[0], idx[1], :] *= 1.0 - alpha
    img[idx[0], idx[1], :] += alpha * col

    if show_border: # 显示 mask 边界
        _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA)

    return img.astype(np.uint8)


def vis_class(img, pos, class_str, font_scale=0.35):
    """
    可视化类别 class.
    """
    x0, y0 = int(pos[0]), int(pos[1])
    # 计算文本的大小 text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # 放置 text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
    # 显示 text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
    return img


def vis_bbox(img, bbox, thick=1):
    """
    可视化边界框 bounding box.
    """
    (x0, y0, w, h) = bbox
    x1, y1 = int(x0 + w), int(y0 + h)
    x0, y0 = int(x0), int(y0)
    cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)
    return img


def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """
    可视化 keypoints (修改自 vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (
        kps[:2, dataset_keypoints.index('right_shoulder')] +
        kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (
        kps[:2, dataset_keypoints.index('right_hip')] +
        kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(
        kps[2, dataset_keypoints.index('right_hip')],
        kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
            color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(mid_hip),
            color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(kp_mask, p1, p2,
                     color=colors[l], thickness=2, lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(kp_mask, p1, radius=3, color=colors[l], 
                       thickness=-1, lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(kp_mask, p2, radius=3, color=colors[l], 
                       thickness=-1, lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)


def vis_one_image_opencv(im, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2, show_box=False, dataset=None, show_class=False):
    """
    Constructs a numpy array with the detections visualized.
    """

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return im

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)
        color_list = colormap()
        mask_color_id = 0

    # 根据面积由大到小依次显示,避免遗漏.
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # 显示 box (off by default)
        if show_box:
            im = vis_bbox(im, (bbox[0], bbox[1], 
                               bbox[2] - bbox[0], bbox[3] - bbox[1]))

        # 显示类别 class (off by default)
        if show_class:
            class_str = get_class_string(classes[i], score, dataset)
            im = vis_class(im, (bbox[0], bbox[1] - 2), class_str)

        # 显示 mask
        if segms is not None and len(segms) > i:
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1
            im = vis_mask(im, masks[..., i], color_mask)

        # 显示 keypoints
        if keypoints is not None and len(keypoints) > i:
            im = vis_keypoints(im, keypoints[i], kp_thresh)

    return im


def vis_one_image(im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False, ext='pdf'):
    """
    可视化检测结果(默认保存为pdf).
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # 根据面积由大到小依次显示,避免遗漏.
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # 显示 box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        # 显示类别 class (off by default)
        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                         color='white')

        # 显示 mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), 
                                                cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True, facecolor=color_mask,
                                  edgecolor='w', linewidth=1.2, alpha=0.5)
                ax.add_patch(polygon)

        # 显示 keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1], kps[1, i1], '.', color=colors[l],
                             markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2], kps[1, i2], '.', color=colors[l],
                             markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line, color=colors[len(kp_lines)], linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                         alpha=0.7)

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')

7. coordinator.py

代码语言:javascript
复制
"""
多线程/进程队列的协调.
shared multithreading/processing queue.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import contextlib
import logging
import Queue
import threading
import traceback

log = logging.getLogger(__name__)


class Coordinator(object):

    def __init__(self):
        self._event = threading.Event()

    def request_stop(self):
        log.debug('Coordinator stopping')
        self._event.set()

    def should_stop(self):
        return self._event.is_set()

    def wait_for_stop(self):
        return self._event.wait()

    @contextlib.contextmanager
    def stop_on_exception(self):
        try:
            yield
        except Exception:
            if not self.should_stop():
                traceback.print_exc()
                self.request_stop()


def coordinated_get(coordinator, queue):
    while not coordinator.should_stop():
        try:
            return queue.get(block=True, timeout=1.0)
        except Queue.Empty:
            continue
    raise Exception('Coordinator stopped during get()')


def coordinated_put(coordinator, queue, element):
    while not coordinator.should_stop():
        try:
            queue.put(element, block=True, timeout=1.0)
            return
        except Queue.Full:
            continue
    raise Exception('Coordinator stopped during put()')

8. collections.py

代码语言:javascript
复制
"""
A simple attribute dictionary used for representing configuration options.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals


class AttrDict(dict):

    def __getattr__(self, name):
        if name in self.__dict__:
            return self.__dict__[name]
        elif name in self:
            return self[name]
        else:
            raise AttributeError(name)

    def __setattr__(self, name, value):
        if name in self.__dict__:
            self.__dict__[name] = value
        else:
            self[name] = value
本文参与 腾讯云自媒体分享计划,分享自作者个人站点/博客。
原始发表:2018年04月03日,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体分享计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
目录
  • Caffe2 - (二十五) Detectron 之 utils 函数(3)
    • 1. lr_policy.py
      • 2. net.py
        • 3. subprocess.py
          • 4. timer.py
            • 5. logging.py
              • 6. vis.py
                • 7. coordinator.py
                  • 8. collections.py
                  相关产品与服务
                  GPU 云服务器
                  GPU 云服务器(Cloud GPU Service,GPU)是提供 GPU 算力的弹性计算服务,具有超强的并行计算能力,作为 IaaS 层的尖兵利器,服务于深度学习训练、科学计算、图形图像处理、视频编解码等场景。腾讯云随时提供触手可得的算力,有效缓解您的计算压力,提升业务效率与竞争力。
                  领券
                  问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档