desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and co... | def matched_row_indices(self):
| return self._reshape_and_cast(tf.gather(self._match_results, self.matched_column_indices()))
|
'Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defa... | def match(self, similarity_matrix, scope=None, **params):
| with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params))
|
'Method to be overriden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 m... | @abstractmethod
def _match(self, similarity_matrix, **params):
| pass
|
'Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.'
| @abstractmethod
def decode(self, data):
| pass
|
'Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.'
| @abstractmethod
def name_scope(self):
| pass
|
'Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.'
| @property
def check_num_anchors(self):
| return True
|
'Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.'
| @abstractmethod
def num_anchors_per_location(self):
| pass
|
'Generates a collection of bounding boxes to be used as anchors.
TODO: remove **params from argument list and make stride and offsets (for
multiple_grid_anchor_generator) constructor arguments.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that... | def generate(self, feature_map_shape_list, **params):
| if (self.check_num_anchors and (len(feature_map_shape_list) != len(self.num_anchors_per_location()))):
raise ValueError('Number of feature maps is expected to equal the length of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors = self.... |
'To be overridden by implementations.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
**params: parameters for anchor generation op
Returns:
boxes: a BoxList holding a collection of N anchor boxes'
| @abstractmethod
def _generate(self, feature_map_shape_list, **params):
| pass
|
'Assert that correct number of anchors was generated.
Args:
anchors: box_list.BoxList object holding anchors generated
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentEr... | def _assert_correct_number_of_anchors(self, anchors, feature_map_shape_list):
| expected_num_anchors = 0
for (num_anchors_per_location, feature_map_shape) in zip(self.num_anchors_per_location(), feature_map_shape_list):
expected_num_anchors += ((num_anchors_per_location * feature_map_shape[0]) * feature_map_shape[1])
return tf.assert_equal(expected_num_anchors, anchors.num_boxe... |
'Constructs a minibatch sampler.'
| def __init__(self):
| pass
|
'Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entri... | @abstractmethod
def subsample(self, indicator, batch_size, **params):
| pass
|
'Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicatin... | @staticmethod
def subsample_indicator(indicator, num_samples):
| indices = tf.where(indicator)
indices = tf.random_shuffle(indices)
indices = tf.reshape(indices, [(-1)])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indic... |
'Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.'
| def preprocess(self, inputs):
| return tf.image.resize_images(inputs, [28, 28])
|
'Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.'
| def predict(self, preprocessed_inputs):
| flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {'class_predictions_with_background': tf.reshape(class_prediction, [(... |
'Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.'
| def postprocess(self, prediction_dict, **params):
| return {'detection_boxes': None, 'detection_scores': None, 'detection_classes': None, 'num_detections': None}
|
'Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors represent... | def loss(self, prediction_dict):
| batch_reg_targets = tf.stack(self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(1.0, dtype=tf.float32, shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._loc... |
'Returns a map of variables to load from a foreign checkpoint.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names to variables.'
| def restore_map(self, from_detection_checkpoint=True):
| return {var.op.name: var for var in tf.global_variables()}
|
'Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.'
| def _assertProtoEqual(self, proto_field, expectation):
| proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
|
'Constructor sets keys_to_features and items_to_handlers.'
| def __init__(self):
| self.keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''), 'image/key/sha256': tf.FixedLenFeature((), tf.string, default_value=''), 'ima... |
'Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
containing image.
fields.InputDat... | def decode(self, tf_example_string_tensor):
| serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, self.items_to_handlers)
keys = decoder.list_items()
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd ... |
'Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width] and cast to boolean type to save memory.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D boolean tensor of shape [num_instances, height, width].'
| def _reshape_instance_masks(self, keys_to_tensors):
| masks = keys_to_tensors['image/segmentation/object']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([(-1), height, width]), tf.int32)
return tf.cas... |
'Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].'
| def __init__(self, scale_factors=None):
| if scale_factors:
assert (len(scale_factors) == 4)
for scalar in scale_factors:
assert (scalar > 0)
self._scale_factors = scale_factors
|
'Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].'
| def _encode(self, boxes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = ((xcenter - xcenter_a) / wa)
ty = ((ycenter - ycenter_a) / ha)
tw = tf.log((w / w... |
'Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.'
| def _decode(self, rel_codes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ty, tx, th, tw) = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w... |
'Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of N anchors. We assume that anchors has an associated
stddev field.
Returns:
a tensor representing N anchor-encoded boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev f... | def _encode(self, boxes, anchors):
| if (not anchors.has_field('stddev')):
raise ValueError('anchors must have a stddev field')
box_corners = boxes.get()
means = anchors.get()
stddev = anchors.get_field('stddev')
return ((box_corners - means) / stddev)
|
'Decode.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors. We assume that anchors has an associated
stddev field.
Returns:
boxes: BoxList holding N bounding boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev field'
| def _decode(self, rel_codes, anchors):
| if (not anchors.has_field('stddev')):
raise ValueError('anchors must have a stddev field')
means = anchors.get()
stddevs = anchors.get_field('stddev')
box_corners = ((rel_codes * stddevs) + means)
return box_list.BoxList(box_corners)
|
'Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equ... | def __init__(self, scale_factors=None):
| if scale_factors:
if (len(scale_factors) != 3):
raise ValueError('The argument scale_factors must be a list of length 3.')
if any(((scalar <= 0) for scalar in scale_factors)):
raise ValueError('The values in scale_factors must all ... |
'Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].'
| def _encode(self, boxes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt((ha * wa))
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
l = tf.sqrt((h * w))
la += EPSILON
l += EPSILON
tx = ((xcenter - xcenter_a) / la)
ty = ((ycenter - ycenter_a) / la)
... |
'Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.'
| def _decode(self, rel_codes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt((ha * wa))
(ty, tx, tl) = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
tl /= self._scale_factors[2]
l = (tf.exp(tl... |
'Constructor for KeypointBoxCoder.
Args:
num_keypoints: Number of keypoints to encode/decode.
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
In addition to scaling ty and tx, the first 2 scalars are used to scale
the y and x coordinates of the keypoints as well. If set to None, does
not perform s... | def __init__(self, num_keypoints, scale_factors=None):
| self._num_keypoints = num_keypoints
if scale_factors:
assert (len(scale_factors) == 4)
for scalar in scale_factors:
assert (scalar > 0)
self._scale_factors = scale_factors
self._keypoint_scale_factors = None
if (scale_factors is not None):
self._keypoint_scale_fac... |
'Encode a box and keypoint collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are
tensors with the shape [N, 4], and keypoints are tensors with the shape
[N, num_keypoints, 2].
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded... | def _encode(self, boxes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
keypoints = boxes.get_field(fields.BoxListFields.keypoints)
keypoints = tf.transpose(tf.reshape(keypoints, [(-1), (self._num_keypoints * 2)]))
num_boxes = b... |
'Decode relative codes to boxes and keypoints.
Args:
rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N
anchor-encoded boxes and keypoints
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes and keypoints.'
| def _decode(self, rel_codes, anchors):
| (ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
num_codes = tf.shape(rel_codes)[0]
result = tf.unstack(tf.transpose(rel_codes))
(ty, tx, th, tw) = result[:4]
tkeypoints = result[4:]
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale... |
'Builds a 1x1 anchor grid to test the size of the output boxes.'
| def test_construct_single_anchor(self):
| scales = [0.5, 1.0, 2.0]
aspect_ratios = [0.25, 1.0, 4.0]
anchor_offset = [7, (-3)]
exp_anchor_corners = [[(-121), (-35), 135, 29], [(-249), (-67), 263, 61], [(-505), (-131), 519, 125], [(-57), (-67), 71, 61], [(-121), (-131), 135, 125], [(-249), (-259), 263, 253], [(-25), (-131), 39, 125], [(-57), (-25... |
'Constructs a GridAnchorGenerator.
Args:
scales: a list of (float) scales, default=(0.5, 1.0, 2.0)
aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0)
base_anchor_size: base anchor size as height, width (
(length-2 float32 list, default=[256, 256])
anchor_stride: difference in centers between base a... | def __init__(self, scales=(0.5, 1.0, 2.0), aspect_ratios=(0.5, 1.0, 2.0), base_anchor_size=None, anchor_stride=None, anchor_offset=None):
| if (base_anchor_size is None):
base_anchor_size = [256, 256]
base_anchor_size = tf.constant(base_anchor_size, tf.float32)
if (anchor_stride is None):
anchor_stride = [16, 16]
anchor_stride = tf.constant(anchor_stride, dtype=tf.float32)
if (anchor_offset is None):
anchor_offse... |
'Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.'
| def num_anchors_per_location(self):
| return [(len(self._scales) * len(self._aspect_ratios))]
|
'Generates a collection of bounding boxes to be used as anchors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0)]. For example, setting
feature_map_shape_list=[(8, 8)] asks for anchors that correspond
to an 8x8 layer. For this anchor generator, only lists of... | def _generate(self, feature_map_shape_list):
| if (not (isinstance(feature_map_shape_list, list) and (len(feature_map_shape_list) == 1))):
raise ValueError('feature_map_shape_list must be a list of length 1.')
if (not all([(isinstance(list_item, tuple) and (len(list_item) == 2)) for list_item in feature_map_shape_list])):
... |
'Builds a 1x1 anchor grid to test the size of the output boxes.'
| def test_construct_single_anchor_grid(self):
| exp_anchor_corners = [[(-121), (-35), 135, 29], [(-249), (-67), 263, 61], [(-505), (-131), 519, 125], [(-57), (-67), 71, 61], [(-121), (-131), 135, 125], [(-249), (-259), 263, 253], [(-25), (-131), 39, 125], [(-57), (-259), 71, 253], [(-121), (-515), 135, 509]]
base_anchor_size = tf.constant([256, 256], dtype=t... |
'Constructs a MultipleGridAnchorGenerator.
To construct anchors, at multiple grid resolutions, one must provide a
list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid
size, a corresponding list of (scale, aspect ratio) box specifications.
For example:
box_specs_list = [[(.1, 1.0), (.1, 2.0)], # f... | def __init__(self, box_specs_list, base_anchor_size=None, clip_window=None):
| if (isinstance(box_specs_list, list) and all([isinstance(list_item, list) for list_item in box_specs_list])):
self._box_specs = box_specs_list
else:
raise ValueError('box_specs_list is expected to be a list of lists of pairs')
if (base_anchor_size is None):
... |
'Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.'
| def num_anchors_per_location(self):
| return [len(box_specs) for box_specs in self._box_specs]
|
'Generates a collection of bounding boxes to be used as anchors.
The number of anchors generated for a single grid with shape MxM where we
place k boxes over each grid center is k*M^2 and thus the total number of
anchors is the sum over all grids. In our box_specs_list example
(see the constructor docstring), we would ... | def _generate(self, feature_map_shape_list, im_height=1, im_width=1, anchor_strides=None, anchor_offsets=None):
| if (not (isinstance(feature_map_shape_list, list) and (len(feature_map_shape_list) == len(self._box_specs)))):
raise ValueError('feature_map_shape_list must be a list with the same length as self._box_specs')
if (not all([(isinstance(list_item, tuple) and (len(list_item) ==... |
'Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO: Add num_valid_columns options to match only that many columns with
all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
num_valid_rows: A scalar or a 1-D tensor wit... | def _match(self, similarity_matrix, num_valid_rows=(-1)):
| distance_matrix = ((-1) * similarity_matrix)
(_, match_results) = image_ops.bipartite_match(distance_matrix, num_valid_rows)
match_results = tf.reshape(match_results, [(-1)])
match_results = tf.cast(match_results, tf.int32)
return match_results
|
'Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. D... | def __init__(self, matched_threshold, unmatched_threshold=None, negatives_lower_than_unmatched=True, force_match_for_each_row=False):
| if ((matched_threshold is None) and (unmatched_threshold is not None)):
raise ValueError('Need to also define matched_threshold whenunmatched_threshold is defined')
self._matched_threshold = matched_threshold
if (unmatched_threshold is None):
self._unmatched_threshold = ... |
'Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.'
| def _match(self, similarity_matrix):
| def _match_when_rows_are_empty():
"Performs matching when the rows of similarity matrix are empty.\n\n When the rows are empty, all detections are false positives. So we return\n a tensor of ... |
'Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.'
| def _set_values_using_indicator(self, x, indicator, val):
| indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, (1 - indicator)), (val * indicator))
|
'Helper to check if two dicts with floatst or integers are close.'
| def assert_dictionary_close(self, dict1, dict2):
| self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))
for key in dict1:
value = dict1[key]
if isinstance(value, float):
self.assertAlmostEqual(value, dict2[key])
else:
self.assertEqual(value, dict2[key])
|
'Builds a DetectionModel based on the model config.
Args:
model_config: A model.proto object containing the config for the desired
DetectionModel.
Returns:
DetectionModel based on the config.'
| def create_model(self, model_config):
| return model_builder.build(model_config, is_training=True)
|
'Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not
nms_iou_threshold: IOU threshold used in Non Maximu... | def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=0.3, nms_max_output_boxes=50):
| self.matching_iou_threshold = matching_iou_threshold
self.nms_iou_threshold = nms_iou_threshold
self.nms_max_output_boxes = nms_max_output_boxes
self.num_groundtruth_classes = num_groundtruth_classes
|
'Compute Object Detection related metrics from a single image.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of ... | def compute_object_detection_metrics(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_lists):
| (detected_boxes, detected_scores, detected_class_labels) = self._remove_invalid_boxes(detected_boxes, detected_scores, detected_class_labels)
(scores, tp_fp_labels) = self._compute_tp_fp(detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult... |
'Compute CorLoc score for object detection result.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected... | def _compute_cor_loc(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels):
| is_class_correctly_detected_in_image = np.zeros(self.num_groundtruth_classes, dtype=int)
for i in range(self.num_groundtruth_classes):
gt_boxes_at_ith_class = groundtruth_boxes[(groundtruth_class_labels == i), :]
detected_boxes_at_ith_class = detected_boxes[(detected_class_labels == i), :]
... |
'Compute CorLoc score for a single class.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
Returns:
is_clas... | def _compute_is_aclass_correctly_detected_in_image(self, detected_boxes, detected_scores, groundtruth_boxes):
| if (detected_boxes.size > 0):
if (groundtruth_boxes.size > 0):
max_score_id = np.argmax(detected_scores)
detected_boxlist = np_box_list.BoxList(np.expand_dims(detected_boxes[max_score_id, :], axis=0))
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = n... |
'Labels true/false positives of detections of an image across all classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidenc... | def _compute_tp_fp(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_lists):
| result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
gt_boxes_at_ith_class = groundtruth_boxes[(groundtruth_class_labels == i), :]
groundtruth_is_difficult_list_at_ith_class = groundtruth_is_difficult_lists[(groundtruth_class_labels == i)]
detecte... |
'Labels boxes detected with the same class from the same image as tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
b... | def _compute_tp_fp_for_single_class(self, detected_boxes, detected_scores, groundtruth_boxes, groundtruth_is_difficult_list):
| if (detected_boxes.size == 0):
return (np.array([], dtype=float), np.array([], dtype=bool))
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_list_ops.non_max_suppression(detected_boxlist, self.nms_max_output_bo... |
'This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.'
| def create_colorful_test_image(self):
| ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), ... |
'Add ground truth info of a single image into the evaluation database.
Args:
image_key: sha256 key of image content
groundtruth_boxes: A numpy array of shape [M, 4] representing object box
coordinates[y_min, x_min, y_max, x_max]
groundtruth_class_labels: A 1-d numpy array of length M representing class
labels
groundtru... | def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None):
| if (image_key in self.groundtruth_boxes):
logging.warn('image %s has already been added to the ground truth database.', image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
... |
'Add detected result of a single image into the evaluation database.
Args:
image_key: sha256 key of image content
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates[y_min, x_min, y_max, x_max]
detected_scores: A 1-d numpy array of length N representing classification
score
detected_clas... | def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels):
| if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))):
raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores)... |
'Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M,
representing M... | def _update_ground_truth_statistics(self, groundtruth_class_labels, groundtruth_is_difficult_list):
| for class_index in range(self.num_class):
num_gt_instances = np.sum((groundtruth_class_labels[(~ groundtruth_is_difficult_list)] == class_index))
self.num_gt_instances_per_class[class_index] += num_gt_instances
if np.any((groundtruth_class_labels == class_index)):
self.num_gt_img... |
'Compute evaluation result.
Returns:
average_precision_per_class: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions_per_class: List of precisions, each precision is a float numpy
array
recalls_per_class: List of recalls, each recall is a float... | def evaluate(self):
| if (self.num_gt_instances_per_class == 0).any():
logging.warn('The following classes have no ground truth examples: %s', np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0))))
for class_index in range(self.num_class):
if (self.num_gt_instances_per_class[class_in... |
'Tests if a good pyramid image is created.'
| def test_diagonal_gradient_image(self):
| pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2)
expected_first_channel = np.array([[3, 2, 1, 0], [4, 3, 2, 1], [5, 4, 3, 2]], dtype=np.float32)
self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]), expected_first_channel)
expected_image = np.array([[[3, 30], [2, 20], [1, 10], [0, 0]... |
'Tests if valid random boxes are created.'
| def test_random_boxes(self):
| num_boxes = 1000
max_height = 3
max_width = 5
boxes = test_utils.create_random_boxes(num_boxes, max_height, max_width)
true_column = (np.ones(shape=num_boxes) == 1)
self.assertAllEqual((boxes[:, 0] < boxes[:, 2]), true_column)
self.assertAllEqual((boxes[:, 1] < boxes[:, 3]), true_column)
... |
'Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data'
| def __init__(self, data):
| if (not isinstance(data, np.ndarray)):
raise ValueError('data must be a numpy array.')
if ((len(data.shape) != 2) or (data.shape[1] != 4)):
raise ValueError('Invalid dimensions for box data.')
if ((data.dtype != np.float32) and (data.dtype != np.float64)):
... |
'Return number of boxes held in collections.'
| def num_boxes(self):
| return self.data['boxes'].shape[0]
|
'Return all non-box fields.'
| def get_extra_fields(self):
| return [k for k in self.data.keys() if (k != 'boxes')]
|
'Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes... | def add_field(self, field, field_data):
| if self.has_field(field):
raise ValueError((('Field ' + field) + 'already exists'))
if ((len(field_data.shape) < 1) or (field_data.shape[0] != self.num_boxes())):
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
|
'Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners'
| def get(self):
| return self.get_field('boxes')
|
'Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field'
| def get_field(self, field):
| if (not self.has_field(field)):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
|
'Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]'
| def get_coordinates(self):
| box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
|
'Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.'
| def _is_valid_boxes(self, data):
| if (data.shape[0] > 0):
for i in moves.range(data.shape[0]):
if ((data[(i, 0)] > data[(i, 2)]) or (data[(i, 1)] > data[(i, 3)])):
return False
return True
|
'Tests meshgrid op with vectors, for which it should match numpy.'
| def test_meshgrid_numpy_comparison(self):
| x = np.arange(4)
y = np.arange(6)
(exp_xgrid, exp_ygrid) = np.meshgrid(x, y)
(xgrid, ygrid) = ops.meshgrid(x, y)
with self.test_session() as sess:
(xgrid_output, ygrid_output) = sess.run([xgrid, ygrid])
self.assertAllEqual(xgrid_output, exp_xgrid)
self.assertAllEqual(ygrid_ou... |
'Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
reuse_weights: Whether to reuse variables. Default is None.
weight_decay: float weight decay for feature extractor (... | def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
| self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._reuse_weights = reuse_weights
self._weight_decay = weight_decay
|
'Feature-extractor specific preprocessing (minus image resizing).'
| @abstractmethod
def preprocess(self, resized_inputs):
| pass
|
'Extracts first stage RPN features.
This function is responsible for extracting feature maps from preprocessed
images. These features are used by the region proposal network (RPN) to
predict proposals.
Args:
preprocessed_inputs: A [batch, height, width, channels] float tensor
representing a batch of images.
scope: A s... | def extract_proposal_features(self, preprocessed_inputs, scope):
| with tf.variable_scope(scope, values=[preprocessed_inputs]):
return self._extract_proposal_features(preprocessed_inputs, scope)
|
'Extracts first stage RPN features, to be overridden.'
| @abstractmethod
def _extract_proposal_features(self, preprocessed_inputs, scope):
| pass
|
'Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shap... | def extract_box_classifier_features(self, proposal_feature_maps, scope):
| with tf.variable_scope(scope, values=[proposal_feature_maps]):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
|
'Extracts second stage box classifier features, to be overridden.'
| @abstractmethod
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
| pass
|
'Returns a map of variables to load from a foreign checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to v... | def restore_from_classification_checkpoint_fn(self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope):
| variables_to_restore = {}
for variable in tf.global_variables():
for scope_name in [first_stage_feature_extractor_scope, second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace((scope_name + '/'), '')
... |
'FasterRCNNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K ... | def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_positive_balance... | super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes)
if (second_stage_batch_size > first_stage_max_proposals):
raise ValueError('second_stage_batch_size should be no greater than first_stage_max_proposals.')
if (not isinstance(first_stage_anchor_generator, grid_anchor_... |
'Max number of proposals (to pad to) for each image in the input batch.
At training time, this is set to be the `second_stage_batch_size` if hard
example miner is not configured, else it is set to
`first_stage_max_proposals`. At inference time, this is always set to
`first_stage_max_proposals`.
Returns:
A positive inte... | @property
def max_num_proposals(self):
| if (self._is_training and (not self._hard_example_miner)):
return self._second_stage_batch_size
return self._first_stage_max_proposals
|
'Feature-extractor specific preprocessing.
See base class.
For Faster R-CNN, we perform image resizing in the base class --- each
class subclassing FasterRCNNMetaArch is responsible for any additional
preprocessing (e.g., scaling pixel values to be in [-1, 1]).
Args:
inputs: a [batch, height_in, width_in, channels] flo... | def preprocess(self, inputs):
| if (inputs.dtype is not tf.float32):
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
resized_inputs = tf.map_fn(self._image_resizer_fn, elems=inputs, dtype=tf.float32, parallel_iterations=self._parallel_iterations)
return self.... |
'Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the
forward pass of the network to yield "raw" un-postprocessed predictions.
If `first_stage_only` is True, this function only returns first stage
RPN predictions (un-postprocessed). Otherwise it retur... | def predict(self, preprocessed_inputs):
| (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)
(rpn_box_encodings, rpn_objectness_predictions_with_background) = self._predict_rpn_proposals(rpn_box_predictor_features)
clip_window = tf.to_float(tf.stack([0, 0, image_sha... |
'Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing cl... | def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape):
| (proposal_boxes_normalized, _, num_proposals) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape)
flattened_proposal_feature_maps = self._compute_second_stage_input_feature_maps(rpn_features_to_crop, proposal_boxes_normalized)
box_classifier_features ... |
'Extracts RPN features.
This function extracts two feature maps: a feature map to be directly
fed to a box predictor (to predict location and objectness scores for
proposals) and a feature map from which to crop regions which will then
be sent to the second stage box classifier.
Args:
preprocessed_inputs: a [batch, hei... | def _extract_rpn_feature_maps(self, preprocessed_inputs):
| image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop = self._feature_extractor.extract_proposal_features(preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors = self._first_stage_anchor_generator.generate([(feature_map... |
'Adds box predictors to RPN feature map to predict proposals.
Note resulting tensors will not have been postprocessed.
Args:
rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch, height, width, depth] to be used for predicting proposal boxes
and corresponding objectness scores.
Returns:
box_encodings: 3-D... | def _predict_rpn_proposals(self, rpn_box_predictor_features):
| num_anchors_per_location = self._first_stage_anchor_generator.num_anchors_per_location()
if (len(num_anchors_per_location) != 1):
raise RuntimeError('anchor_generator is expected to generate anchors corresponding to a single feature map.')
box_predictions = self._fir... |
'Removes anchors that (partially) fall outside an image.
Also removes associated box encodings and objectness predictions.
Args:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[ba... | def _remove_invalid_anchors_and_predictions(self, box_encodings, objectness_predictions_with_background, anchors_boxlist, clip_window):
| (pruned_anchors_boxlist, keep_indices) = box_list_ops.prune_outside_window(anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return tf.map_fn(partial(tf.gather, indices=keep_indices), elems=predictions_tensor, dtype=tf.float32, parallel_iterations=self._parallel_iteration... |
'Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.
Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
[A * B, ..., depth].
Args:
inputs: A float tensor with shape [A, B, ..., depth]. Note that the first
two and last dimensions must be statically defined.
Returns:
A float tensor ... | def _flatten_first_two_dimensions(self, inputs):
| combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack(([(combined_shape[0] * combined_shape[1])] + combined_shape[2:]))
return tf.reshape(inputs, flattened_shape)
|
'Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a di... | def postprocess(self, prediction_dict):
| with tf.name_scope('FirstStagePostprocessor'):
image_shape = prediction_dict['image_shape']
if self._first_stage_only:
(proposal_boxes, proposal_scores, num_proposals) = self._postprocess_rpn(prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_backgroun... |
'Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
on the result.
Note that the behavior of this function is slightly modified during
training --- specifically, we stop the gradient from passing through the
proposal boxes and we on... | def _postprocess_rpn(self, rpn_box_encodings_batch, rpn_objectness_predictions_with_background_batch, anchors, image_shape):
| rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
proposal_boxes = self._batch_decode_boxes(rpn_bo... |
'Unpads proposals and samples a minibatch for second stage.
Args:
proposal_boxes: A float tensor with shape
[batch_size, num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
proposal_scores: A float tensor with ... | def _unpad_proposals_and_sample_box_classifier_batch(self, proposal_boxes, proposal_scores, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list):
| single_image_proposal_box_sample = []
single_image_proposal_score_sample = []
single_image_num_proposals_sample = []
for (single_image_proposal_boxes, single_image_proposal_scores, single_image_num_proposals, single_image_groundtruth_boxlist, single_image_groundtruth_classes_with_background) in zip(tf.u... |
'Helper function for preparing groundtruth data for target assignment.
In order to be consistent with the model.DetectionModel interface,
groundtruth boxes are specified in normalized coordinates and classes are
specified as label indices with no assumed background category. To prepare
for target assignment, we:
1) co... | def _format_groundtruth_data(self, image_shape):
| groundtruth_boxlists = [box_list_ops.to_absolute_coordinates(box_list.BoxList(boxes), image_shape[1], image_shape[2]) for boxes in self.groundtruth_lists(fields.BoxListFields.boxes)]
groundtruth_classes_with_background_list = [tf.to_float(tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')) for one_hot_... |
'Samples a mini-batch of proposals to be sent to the box classifier.
Helper function for self._postprocess_rpn.
Args:
proposal_boxlist: A BoxList containing K proposal boxes in absolute
coordinates.
groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in
absolute coordinates.
groundtruth_classes_with_ba... | def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background):
| (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background)
cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0))
positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)
sampled_indices =... |
'Crops to a set of proposals from the feature map for a batch of images.
Helper function for self._postprocess_rpn. This function calls
`tf.image.crop_and_resize` to create the feature map to be passed to the
second stage box classifier for each proposal.
Args:
features_to_crop: A float32 tensor with shape
[batch_size,... | def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized):
| def get_box_inds(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(((dim is None) for dim in proposals_shape)):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(tf.range(start=0, l... |
'Converts predictions from the second stage box classifier to detections.
Args:
refined_box_encodings: a 3-D tensor with shape
[total_num_padded_proposals, num_classes, 4] representing predicted
(final) refined box encodings.
class_predictions_with_background: a 3-D tensor with shape
[total_num_padded_proposals, num_cl... | def _postprocess_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, image_shape, mask_predictions=None, mask_threshold=0.5):
| refined_box_encodings_batch = tf.reshape(refined_box_encodings, [(-1), self.max_num_proposals, self.num_classes, self._box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(class_predictions_with_background, [(-1), self.max_num_proposals, (self.num_classes + 1)])
refined_decoded_boxes_b... |
'Decode tensor of refined box encodings.
Args:
refined_box_encodings: a 3-D tensor with shape
[batch_size, max_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings.
proposal_boxes: [batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes... | def _batch_decode_boxes(self, box_encodings, anchor_boxes):
| 'Decodes box encodings with respect to the anchor boxes.\n\n Args:\n box_encodings: a 4-D tensor with shape\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n ... |
'Compute scalar loss tensors given prediction tensors.
If first_stage_only=True, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict metho... | def loss(self, prediction_dict, scope=None):
| with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list) = self._format_groundtruth_data(prediction_dict['image_shape'])
loss_dict = self._loss_rpn(prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_w... |
'Computes scalar RPN loss tensors.
Uses self._proposal_target_assigner to obtain regression and classification
targets for the first stage RPN, samples a "minibatch" of anchors to
participate in the loss computation, and returns the RPN losses.
Args:
rpn_box_encodings: A 4-D float tensor of shape
[batch_size, num_ancho... | def _loss_rpn(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, groundtruth_boxlists, groundtruth_classes_with_background_list):
| with tf.name_scope('RPNLoss'):
(batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets(self._proposal_target_assigner, box_list.BoxList(anchors), groundtruth_boxlists, (len(groundtruth_boxlists) * [None]))
batch_cls_targets = tf.squeeze(... |
'Computes scalar box classifier loss tensors.
Uses self._detector_target_assigner to obtain regression and classification
targets for the second stage box classifier, optionally performs
hard mining, and returns losses. All losses are computed independently
for each image and then averaged across the batch.
This funct... | def _loss_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list):
| with tf.name_scope('BoxClassifierLoss'):
paddings_indicator = self._padded_batched_proposals_indicator(num_proposals, self.max_num_proposals)
proposal_boxlists = [box_list.BoxList(proposal_boxes_single_image) for proposal_boxes_single_image in tf.unstack(proposal_boxes)]
batch_size = len(pro... |
'Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].'
| def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals):
| batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
|
'Unpads proposals and applies hard mining.
Args:
proposal_boxlists: A list of `batch_size` BoxLists each representing
`self.max_num_proposals` representing decoded proposal bounding boxes
for each image.
second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` repres... | def _unpad_proposals_and_apply_hard_mining(self, proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals):
| for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, single_image_num_proposals) in zip(proposal_boxlists, tf.unstack(second_stage_loc_losses), tf.unstack(second_stage_cls_losses), tf.unstack(num_proposals)):
proposal_boxlist = box_list.BoxList(tf.slice(proposal_boxlist.get(), [0, 0], [singl... |
'Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping ... | def restore_map(self, from_detection_checkpoint=True):
| if (not from_detection_checkpoint):
return self._feature_extractor.restore_from_classification_checkpoint_fn(self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope)
variables_to_restore = tf.global_variables()
variables_to_restore.append(slim.get_or_create_global_step())... |
'Preprocesses images for feature extraction (minus image resizing).
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.'
| @abstractmethod
def preprocess(self, resized_inputs):
| pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.