text
stringlengths
0
828
``similarity_thresholds`` : (float, float)
The Jaccard similarity threshold, below which patch locations are considered to be negative, and above which patch locations are considered to be positive examples.
``parallel`` : int or ``None``
If given, the total number of parallel processes, which are used to extract features (the current process index is read from the ``SGE_TASK_ID`` environment variable)
``mirror`` : bool
Extract positive and negative samples also from horizontally mirrored images?
``use_every_nth_negative_scale`` : int
Skip some negative scales to decrease the number of negative examples, i.e., only extract and store negative features, when ``scale_counter % use_every_nth_negative_scale == 0``
.. note::
The ``scale_counter`` is not reset between images, so that we might get features from different scales in subsequent images.
""""""
feature_file = self._feature_file(parallel)
bob.io.base.create_directories_safe(self.feature_directory)
if parallel is None or ""SGE_TASK_ID"" not in os.environ or os.environ[""SGE_TASK_ID""] == '1':
extractor_file = os.path.join(self.feature_directory, ""Extractor.hdf5"")
hdf5 = bob.io.base.HDF5File(extractor_file, ""w"")
feature_extractor.save(hdf5)
del hdf5
total_positives, total_negatives = 0, 0
indices = parallel_part(range(len(self)), parallel)
if not indices:
logger.warning(""The index range for the current parallel thread is empty."")
else:
logger.info(""Extracting features for images in range %d - %d of %d"", indices[0], indices[-1], len(self))
hdf5 = bob.io.base.HDF5File(feature_file, ""w"")
for index in indices:
hdf5.create_group(""Image-%d"" % index)
hdf5.cd(""Image-%d"" % index)
logger.debug(""Processing file %d of %d: %s"", index+1, indices[-1]+1, self.image_paths[index])
# load image
image = bob.io.base.load(self.image_paths[index])
if image.ndim == 3:
image = bob.ip.color.rgb_to_gray(image)
# get ground_truth bounding boxes
ground_truth = self.bounding_boxes[index]
# collect image and GT for originally and mirrored image
images = [image] if not mirror else [image, bob.ip.base.flop(image)]
ground_truths = [ground_truth] if not mirror else [ground_truth, [gt.mirror_x(image.shape[1]) for gt in ground_truth]]
parts = ""om""
# now, sample
scale_counter = -1
for image, ground_truth, part in zip(images, ground_truths, parts):
for scale, scaled_image_shape in sampler.scales(image):
scale_counter += 1
scaled_gt = [gt.scale(scale) for gt in ground_truth]
positives = []
negatives = []
# iterate over all possible positions in the image
for bb in sampler.sample_scaled(scaled_image_shape):
# check if the patch is a positive example
positive = False
negative = True
for gt in scaled_gt:
similarity = bb.similarity(gt)
if similarity > similarity_thresholds[1]:
positive = True
break
if similarity > similarity_thresholds[0]:
negative = False
break
if positive:
positives.append(bb)
elif negative and scale_counter % use_every_nth_negative_scale == 0:
negatives.append(bb)
# per scale, limit the number of positive and negative samples
positives = [positives[i] for i in quasi_random_indices(len(positives), number_of_examples_per_scale[0])]
negatives = [negatives[i] for i in quasi_random_indices(len(negatives), number_of_examples_per_scale[1])]
# extract features
feature_extractor.prepare(image, scale)
# .. negative features
if negatives:
negative_features = numpy.zeros((len(negatives), feature_extractor.number_of_features), numpy.uint16)
for i, bb in enumerate(negatives):
feature_extractor.extract_all(bb, negative_features, i)
hdf5.set(""Negatives-%s-%.5f"" % (part,scale), negative_features)
total_negatives += len(negatives)
# positive features
if positives:
positive_features = numpy.zeros((len(positives), feature_extractor.number_of_features), numpy.uint16)
for i, bb in enumerate(positives):
feature_extractor.extract_all(bb, positive_features, i)
hdf5.set(""Positives-%s-%.5f"" % (part,scale), positive_features)
total_positives += len(positives)