text
stringlengths 0
828
|
|---|
features.append(read[negative_indices.popleft() - negative_count, :])
|
labels.append(-1)
|
negative_count += size
|
hdf5.cd("".."")
|
# return features and labels
|
return numpy.array(features), numpy.array(labels)
|
else:
|
positive_count -= len(self.positive_indices)
|
negative_count -= len(self.negative_indices)
|
logger.info(""Getting worst %d of %d positive and worst %d of %d negative examples"", min(maximum_number_of_positives, positive_count), positive_count, min(maximum_number_of_negatives, negative_count), negative_count)
|
# compute the worst features based on the current model
|
worst_positives, worst_negatives = [], []
|
positive_count, negative_count = 0, 0
|
for feature_file in feature_files:
|
hdf5 = bob.io.base.HDF5File(feature_file)
|
for image in sorted(hdf5.sub_groups(recursive=False, relative=True)):
|
hdf5.cd(image)
|
for scale in sorted(hdf5.keys(relative=True)):
|
read = hdf5.get(scale)
|
size = read.shape[0]
|
prediction = bob.blitz.array((size,), numpy.float64)
|
# forward features through the model
|
result = model.forward(read, prediction)
|
if scale.startswith(""Positives""):
|
indices = [i for i in range(size) if positive_count + i not in self.positive_indices]
|
worst_positives.extend([(prediction[i], positive_count + i, read[i]) for i in indices if prediction[i] <= 0])
|
positive_count += size
|
else:
|
indices = [i for i in range(size) if negative_count + i not in self.negative_indices]
|
worst_negatives.extend([(prediction[i], negative_count + i, read[i]) for i in indices if prediction[i] >= 0])
|
negative_count += size
|
hdf5.cd("".."")
|
# cut off good results
|
if maximum_number_of_positives is not None and len(worst_positives) > maximum_number_of_positives:
|
# keep only the positives with the low predictions (i.e., the worst)
|
worst_positives = sorted(worst_positives, key=lambda k: k[0])[:maximum_number_of_positives]
|
if maximum_number_of_negatives is not None and len(worst_negatives) > maximum_number_of_negatives:
|
# keep only the negatives with the high predictions (i.e., the worst)
|
worst_negatives = sorted(worst_negatives, reverse=True, key=lambda k: k[0])[:maximum_number_of_negatives]
|
# mark all indices to be used
|
self.positive_indices |= set(k[1] for k in worst_positives)
|
self.negative_indices |= set(k[1] for k in worst_negatives)
|
# finally, collect features and labels
|
return numpy.array([f[2] for f in worst_positives] + [f[2] for f in worst_negatives]), numpy.array([1]*len(worst_positives) + [-1]*len(worst_negatives))"
|
782,"def feature_extractor(self):
|
""""""feature_extractor() -> extractor
|
Returns the feature extractor used to extract the positive and negative features.
|
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
|
**Returns:**
|
``extractor`` : :py:class:`FeatureExtractor`
|
The feature extractor used to extract the features stored in the ``feature_directory``
|
""""""
|
extractor_file = os.path.join(self.feature_directory, ""Extractor.hdf5"")
|
if not os.path.exists(extractor_file):
|
raise IOError(""Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?"" % extractor_file)
|
hdf5 = bob.io.base.HDF5File(extractor_file)
|
return FeatureExtractor(hdf5)"
|
783,"def get(self, param, default=EMPTY):
|
""""""
|
Returns the nparam value, and returns the default if it doesn't exist.
|
If default is none, an exception will be raised instead.
|
the returned parameter will have been specialized against the global context
|
""""""
|
if not self.has(param):
|
if default is not EMPTY:
|
return default
|
raise ParamNotFoundException(""value for %s not found"" % param)
|
context_dict = copy.deepcopy(self.manifest.get_context_dict())
|
for k, v in self.raw_dict.items():
|
context_dict[""%s:%s"" % (self.feature_name, k)] = v
|
cur_value = self.raw_dict[param]
|
prev_value = None
|
max_depth = 5
|
# apply the context until doing so does not change the value
|
while cur_value != prev_value and max_depth > 0:
|
prev_value = cur_value
|
try:
|
cur_value = str(prev_value) % context_dict
|
except KeyError:
|
e = sys.exc_info()[1]
|
key = e.args[0]
|
if key.startswith('config:'):
|
missing_key = key.split(':')[1]
|
if self.manifest.inputs.is_input(missing_key):
|
val = self.manifest.inputs.get_input(missing_key)
|
context_dict[key] = val
|
else:
|
logger.warn(""Could not specialize %s! Error: %s"" % (self.raw_dict[param], e))
|
return self.raw_dict[param]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.