_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q42700 | Alignment.get_id | train | def get_id(self):
"""Returns unique id of an alignment. """
return hash(str(self.title) + str(self.best_score()) + str(self.hit_def)) | python | {
"resource": ""
} |
q42701 | render_template | train | def render_template(template, **context):
"""Renders a given template and context.
:param template: The template name
:param context: the variables that should be available in the
context of the template.
"""
parts = template.split('/')
renderer = _get_renderer(parts[:-1])
return renderer.render(renderer.load_template(parts[-1:][0]), context) | python | {
"resource": ""
} |
q42702 | open | train | def open(pattern, read_only=False):
"""
Return a root descriptor to work with one or multiple NetCDF files.
Keyword arguments:
pattern -- a list of filenames or a string pattern.
"""
root = NCObject.open(pattern, read_only=read_only)
return root, root.is_new | python | {
"resource": ""
} |
q42703 | getvar | train | def getvar(root, name, vtype='', dimensions=(), digits=0, fill_value=None,
source=None):
"""
Return a variable from a NCFile or NCPackage instance. If the variable
doesn't exists create it.
Keyword arguments:
root -- the root descriptor returned by the 'open' function
name -- the name of the variable
vtype -- the type of each value, ex ['f4', 'i4', 'i1', 'S1'] (default '')
dimensions -- the tuple with dimensions name of the variables (default ())
digits -- the precision required when using a 'f4' vtype (default 0)
fill_value -- the initial value used in the creation time (default None)
source -- the source variable to be copied (default None)
"""
return root.getvar(name, vtype, dimensions, digits, fill_value, source) | python | {
"resource": ""
} |
q42704 | loader | train | def loader(pattern, dimensions=None, distributed_dim='time', read_only=False):
"""
It provide a root descriptor to be used inside a with statement. It
automatically close the root when the with statement finish.
Keyword arguments:
root -- the root descriptor returned by the 'open' function
"""
if dimensions:
root = tailor(pattern, dimensions, distributed_dim, read_only=read_only)
else:
root, _ = open(pattern, read_only=read_only)
yield root
root.close() | python | {
"resource": ""
} |
q42705 | dict_copy | train | def dict_copy(func):
"copy dict args, to avoid modifying caller's copy"
def proxy(*args, **kwargs):
new_args = []
new_kwargs = {}
for var in kwargs:
if isinstance(kwargs[var], dict):
new_kwargs[var] = dict(kwargs[var])
else:
new_kwargs[var] = kwargs[var]
for arg in args:
if isinstance(arg, dict):
new_args.append(dict(arg))
else:
new_args.append(arg)
return func(*new_args, **new_kwargs)
return proxy | python | {
"resource": ""
} |
q42706 | is_listish | train | def is_listish(obj):
"""Check if something quacks like a list."""
if isinstance(obj, (list, tuple, set)):
return True
return is_sequence(obj) | python | {
"resource": ""
} |
q42707 | unique_list | train | def unique_list(lst):
"""Make a list unique, retaining order of initial appearance."""
uniq = []
for item in lst:
if item not in uniq:
uniq.append(item)
return uniq | python | {
"resource": ""
} |
q42708 | check_compatibility | train | def check_compatibility(datasets, reqd_num_features=None):
"""
Checks whether the given MLdataset instances are compatible
i.e. with same set of subjects, each beloning to the same class in all instances.
Checks the first dataset in the list against the rest, and returns a boolean array.
Parameters
----------
datasets : Iterable
A list of n datasets
reqd_num_features : int
The required number of features in each dataset.
Helpful to ensure test sets are compatible with training set,
as well as within themselves.
Returns
-------
all_are_compatible : bool
Boolean flag indicating whether all datasets are compatible or not
compatibility : list
List indicating whether first dataset is compatible with the rest individually.
This could be useful to select a subset of mutually compatible datasets.
Length : n-1
dim_mismatch : bool
Boolean flag indicating mismatch in dimensionality from that specified
size_descriptor : tuple
A tuple with values for (num_samples, reqd_num_features)
- num_samples must be common for all datasets that are evaluated for compatibility
- reqd_num_features is None (when no check on dimensionality is perfomed), or
list of corresponding dimensionalities for each input dataset
"""
from collections import Iterable
if not isinstance(datasets, Iterable):
raise TypeError('Input must be an iterable '
'i.e. (list/tuple) of MLdataset/similar instances')
datasets = list(datasets) # to make it indexable if coming from a set
num_datasets = len(datasets)
check_dimensionality = False
dim_mismatch = False
if reqd_num_features is not None:
if isinstance(reqd_num_features, Iterable):
if len(reqd_num_features) != num_datasets:
raise ValueError('Specify dimensionality for exactly {} datasets.'
' Given for a different number {}'
''.format(num_datasets, len(reqd_num_features)))
reqd_num_features = list(map(int, reqd_num_features))
else: # same dimensionality for all
reqd_num_features = [int(reqd_num_features)] * num_datasets
check_dimensionality = True
else:
# to enable iteration
reqd_num_features = [None,] * num_datasets
pivot = datasets[0]
if not isinstance(pivot, MLDataset):
pivot = MLDataset(pivot)
if check_dimensionality and pivot.num_features != reqd_num_features[0]:
warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.'
''.format(reqd_num_features[0], pivot.num_features))
dim_mismatch = True
compatible = list()
for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]):
if not isinstance(ds, MLDataset):
ds = MLDataset(ds)
is_compatible = True
# compound bool will short-circuit, not optim required
if pivot.num_samples != ds.num_samples \
or pivot.keys != ds.keys \
or pivot.classes != ds.classes:
is_compatible = False
if check_dimensionality and reqd_dim != ds.num_features:
warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.'
''.format(reqd_dim, ds.num_features))
dim_mismatch = True
compatible.append(is_compatible)
return all(compatible), compatible, dim_mismatch, \
(pivot.num_samples, reqd_num_features) | python | {
"resource": ""
} |
q42709 | print_info | train | def print_info(ds, ds_path=None):
"Prints basic summary of a given dataset."
if ds_path is None:
bname = ''
else:
bname = basename(ds_path)
dashes = '-' * len(bname)
print('\n{}\n{}\n{:full}'.format(dashes, bname, ds))
return | python | {
"resource": ""
} |
q42710 | print_meta | train | def print_meta(ds, ds_path=None):
"Prints meta data for subjects in given dataset."
print('\n#' + ds_path)
for sub, cls in ds.classes.items():
print('{},{}'.format(sub, cls))
return | python | {
"resource": ""
} |
q42711 | combine_and_save | train | def combine_and_save(add_path_list, out_path):
"""
Combines whatever datasets that can be combined,
and save the bigger dataset to a given location.
"""
add_path_list = list(add_path_list)
# first one!
first_ds_path = add_path_list[0]
print('Starting with {}'.format(first_ds_path))
combined = MLDataset(first_ds_path)
for ds_path in add_path_list[1:]:
try:
combined = combined + MLDataset(ds_path)
except:
print(' Failed to add {}'.format(ds_path))
traceback.print_exc()
else:
print('Successfully added {}'.format(ds_path))
combined.save(out_path)
return | python | {
"resource": ""
} |
q42712 | get_parser | train | def get_parser():
"""Argument specifier.
"""
parser = argparse.ArgumentParser(prog='pyradigm')
parser.add_argument('path_list', nargs='*', action='store',
default=None, help='List of paths to display info about.')
parser.add_argument('-m', '--meta', action='store_true', dest='meta_requested',
required=False,
default=False, help='Prints the meta data (subject_id,class).')
parser.add_argument('-i', '--info', action='store_true', dest='summary_requested',
required=False,
default=False,
help='Prints summary info (classes, #samples, #features).')
arithmetic_group = parser.add_argument_group('Options for multiple datasets')
arithmetic_group.add_argument('-a', '--add', nargs='+', action='store',
dest='add_path_list', required=False,
default=None,
help='List of MLDatasets to combine')
arithmetic_group.add_argument('-o', '--out_path', action='store', dest='out_path',
required=False,
default=None,
help='Output path to save the resulting dataset.')
return parser | python | {
"resource": ""
} |
q42713 | parse_args | train | def parse_args():
"""Arg parser.
"""
parser = get_parser()
if len(sys.argv) < 2:
parser.print_help()
logging.warning('Too few arguments!')
parser.exit(1)
# parsing
try:
params = parser.parse_args()
except Exception as exc:
print(exc)
raise ValueError('Unable to parse command-line arguments.')
path_list = list()
if params.path_list is not None:
for dpath in params.path_list:
if pexists(dpath):
path_list.append(realpath(dpath))
else:
print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath))
add_path_list = list()
out_path = None
if params.add_path_list is not None:
for dpath in params.add_path_list:
if pexists(dpath):
add_path_list.append(realpath(dpath))
else:
print('Below dataset does not exist. Ignoring it.\n{}'.format(dpath))
if params.out_path is None:
raise ValueError(
'Output path must be specified to save the combined dataset to')
out_path = realpath(params.out_path)
parent_dir = dirname(out_path)
if not pexists(parent_dir):
os.mkdir(parent_dir)
if len(add_path_list) < 2:
raise ValueError('Need a minimum of datasets to combine!!')
# removing duplicates (from regex etc)
path_list = set(path_list)
add_path_list = set(add_path_list)
return path_list, params.meta_requested, params.summary_requested, \
add_path_list, out_path | python | {
"resource": ""
} |
q42714 | MLDataset.data_and_labels | train | def data_and_labels(self):
"""
Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids
"""
sample_ids = np.array(self.keys)
label_dict = self.labels
matrix = np.full([self.num_samples, self.num_features], np.nan)
labels = np.full([self.num_samples, 1], np.nan)
for ix, sample in enumerate(sample_ids):
matrix[ix, :] = self.__data[sample]
labels[ix] = label_dict[sample]
return matrix, np.ravel(labels), sample_ids | python | {
"resource": ""
} |
q42715 | MLDataset.classes | train | def classes(self, values):
"""Classes setter."""
if isinstance(values, dict):
if self.__data is not None and len(self.__data) != len(values):
raise ValueError(
'number of samples do not match the previously assigned data')
elif set(self.keys) != set(list(values)):
raise ValueError('sample ids do not match the previously assigned ids.')
else:
self.__classes = values
else:
raise ValueError('classes input must be a dictionary!') | python | {
"resource": ""
} |
q42716 | MLDataset.feature_names | train | def feature_names(self, names):
"Stores the text labels for features"
if len(names) != self.num_features:
raise ValueError("Number of names do not match the number of features!")
if not isinstance(names, (Sequence, np.ndarray, np.generic)):
raise ValueError("Input is not a sequence. "
"Ensure names are in the same order "
"and length as features.")
self.__feature_names = np.array(names) | python | {
"resource": ""
} |
q42717 | MLDataset.glance | train | def glance(self, nitems=5):
"""Quick and partial glance of the data matrix.
Parameters
----------
nitems : int
Number of items to glance from the dataset.
Default : 5
Returns
-------
dict
"""
nitems = max([1, min([nitems, self.num_samples - 1])])
return self.__take(nitems, iter(self.__data.items())) | python | {
"resource": ""
} |
q42718 | MLDataset.check_features | train | def check_features(self, features):
"""
Method to ensure data to be added is not empty and vectorized.
Parameters
----------
features : iterable
Any data that can be converted to a numpy array.
Returns
-------
features : numpy array
Flattened non-empty numpy array.
Raises
------
ValueError
If input data is empty.
"""
if not isinstance(features, np.ndarray):
features = np.asarray(features)
if features.size <= 0:
raise ValueError('provided features are empty.')
if features.ndim > 1:
features = np.ravel(features)
return features | python | {
"resource": ""
} |
q42719 | MLDataset.add_sample | train | def add_sample(self, sample_id, features, label,
class_id=None,
overwrite=False,
feature_names=None):
"""Adds a new sample to the dataset with its features, label and class ID.
This is the preferred way to construct the dataset.
Parameters
----------
sample_id : str, int
The identifier that uniquely identifies this sample.
features : list, ndarray
The features for this sample
label : int, str
The label for this sample
class_id : int, str
The class for this sample.
If not provided, label converted to a string becomes its ID.
overwrite : bool
If True, allows the overwite of features for an existing subject ID.
Default : False.
feature_names : list
The names for each feature. Assumed to be in the same order as `features`
Raises
------
ValueError
If `sample_id` is already in the MLDataset (and overwrite=False), or
If dimensionality of the current sample does not match the current, or
If `feature_names` do not match existing names
TypeError
If sample to be added is of different data type compared to existing samples.
"""
if sample_id in self.__data and not overwrite:
raise ValueError('{} already exists in this dataset!'.format(sample_id))
# ensuring there is always a class name, even when not provided by the user.
# this is needed, in order for __str__ method to work.
# TODO consider enforcing label to be numeric and class_id to be string
# so portability with other packages is more uniform e.g. for use in scikit-learn
if class_id is None:
class_id = str(label)
features = self.check_features(features)
if self.num_samples <= 0:
self.__data[sample_id] = features
self.__labels[sample_id] = label
self.__classes[sample_id] = class_id
self.__dtype = type(features)
self.__num_features = features.size if isinstance(features,
np.ndarray) else len(
features)
if feature_names is None:
self.__feature_names = self.__str_names(self.num_features)
else:
if self.__num_features != features.size:
raise ValueError('dimensionality of this sample ({}) '
'does not match existing samples ({})'
''.format(features.size, self.__num_features))
if not isinstance(features, self.__dtype):
raise TypeError("Mismatched dtype. Provide {}".format(self.__dtype))
self.__data[sample_id] = features
self.__labels[sample_id] = label
self.__classes[sample_id] = class_id
if feature_names is not None:
# if it was never set, allow it
# class gets here when adding the first sample,
# after dataset was initialized with empty constructor
if self.__feature_names is None:
self.__feature_names = np.array(feature_names)
else: # if set already, ensure a match
if not np.array_equal(self.feature_names, np.array(feature_names)):
raise ValueError(
"supplied feature names do not match the existing names!") | python | {
"resource": ""
} |
q42720 | MLDataset.del_sample | train | def del_sample(self, sample_id):
"""
Method to remove a sample from the dataset.
Parameters
----------
sample_id : str
sample id to be removed.
Raises
------
UserWarning
If sample id to delete was not found in the dataset.
"""
if sample_id not in self.__data:
warnings.warn('Sample to delete not found in the dataset - nothing to do.')
else:
self.__data.pop(sample_id)
self.__classes.pop(sample_id)
self.__labels.pop(sample_id)
print('{} removed.'.format(sample_id)) | python | {
"resource": ""
} |
q42721 | MLDataset.get_feature_subset | train | def get_feature_subset(self, subset_idx):
"""
Returns the subset of features indexed numerically.
Parameters
----------
subset_idx : list, ndarray
List of indices to features to be returned
Returns
-------
MLDataset : MLDataset
with subset of features requested.
Raises
------
UnboundLocalError
If input indices are out of bounds for the dataset.
"""
subset_idx = np.asarray(subset_idx)
if not (max(subset_idx) < self.__num_features) and (min(subset_idx) >= 0):
raise UnboundLocalError('indices out of range for the dataset. '
'Max index: {} Min index : 0'.format(
self.__num_features))
sub_data = {sample: features[subset_idx] for sample, features in
self.__data.items()}
new_descr = 'Subset features derived from: \n ' + self.__description
subdataset = MLDataset(data=sub_data,
labels=self.__labels, classes=self.__classes,
description=new_descr,
feature_names=self.__feature_names[subset_idx])
return subdataset | python | {
"resource": ""
} |
q42722 | MLDataset.keys_with_value | train | def keys_with_value(dictionary, value):
"Returns a subset of keys from the dict with the value supplied."
subset = [key for key in dictionary if dictionary[key] == value]
return subset | python | {
"resource": ""
} |
q42723 | MLDataset.get_class | train | def get_class(self, class_id):
"""
Returns a smaller dataset belonging to the requested classes.
Parameters
----------
class_id : str or list
identifier(s) of the class(es) to be returned.
Returns
-------
MLDataset
With subset of samples belonging to the given class(es).
Raises
------
ValueError
If one or more of the requested classes do not exist in this dataset.
If the specified id is empty or None
"""
if class_id in [None, '']:
raise ValueError("class id can not be empty or None.")
if isinstance(class_id, str):
class_ids = [class_id, ]
else:
class_ids = class_id
non_existent = set(self.class_set).intersection(set(class_ids))
if len(non_existent) < 1:
raise ValueError(
'These classes {} do not exist in this dataset.'.format(non_existent))
subsets = list()
for class_id in class_ids:
subsets_this_class = self.keys_with_value(self.__classes, class_id)
subsets.extend(subsets_this_class)
return self.get_subset(subsets) | python | {
"resource": ""
} |
q42724 | MLDataset.transform | train | def transform(self, func, func_description=None):
"""
Applies a given a function to the features of each subject
and returns a new dataset with other info unchanged.
Parameters
----------
func : callable
A valid callable that takes in a single ndarray and returns a single ndarray.
Ensure the transformed dimensionality must be the same for all subjects.
If your function requires more than one argument,
use `functools.partial` to freeze all the arguments
except the features for the subject.
func_description : str, optional
Human readable description of the given function.
Returns
-------
xfm_ds : MLDataset
with features obtained from subject-wise transform
Raises
------
TypeError
If given func is not a callable
ValueError
If transformation of any of the subjects features raises an exception.
Examples
--------
Simple:
.. code-block:: python
from pyradigm import MLDataset
thickness = MLDataset(in_path='ADNI_thickness.csv')
pcg_thickness = thickness.apply_xfm(func=get_pcg, description = 'applying ROI mask for PCG')
pcg_median = pcg_thickness.apply_xfm(func=np.median, description='median per subject')
Complex example with function taking more than one argument:
.. code-block:: python
from pyradigm import MLDataset
from functools import partial
import hiwenet
thickness = MLDataset(in_path='ADNI_thickness.csv')
roi_membership = read_roi_membership()
hw = partial(hiwenet, groups = roi_membership)
thickness_hiwenet = thickness.transform(func=hw, description = 'histogram weighted networks')
median_thk_hiwenet = thickness_hiwenet.transform(func=np.median, description='median per subject')
"""
if not callable(func):
raise TypeError('Given function {} is not a callable'.format(func))
xfm_ds = MLDataset()
for sample, data in self.__data.items():
try:
xfm_data = func(data)
except:
print('Unable to transform features for {}. Quitting.'.format(sample))
raise
xfm_ds.add_sample(sample, xfm_data,
label=self.__labels[sample],
class_id=self.__classes[sample])
xfm_ds.description = "{}\n{}".format(func_description, self.__description)
return xfm_ds | python | {
"resource": ""
} |
q42725 | MLDataset.random_subset_ids_by_count | train | def random_subset_ids_by_count(self, count_per_class=1):
"""
Returns a random subset of sample ids of specified size by count,
within each class.
Parameters
----------
count_per_class : int
Exact number of samples per each class.
Returns
-------
subset : list
Combined list of sample ids from all classes.
"""
class_sizes = self.class_sizes
subsets = list()
if count_per_class < 1:
warnings.warn('Atleast one sample must be selected from each class')
return list()
elif count_per_class >= self.num_samples:
warnings.warn('All samples requested - returning a copy!')
return self.keys
# seeding the random number generator
# random.seed(random_seed)
for class_id, class_size in class_sizes.items():
# samples belonging to the class
this_class = self.keys_with_value(self.classes, class_id)
# shuffling the sample order; shuffling works in-place!
random.shuffle(this_class)
# clipping the range to [0, class_size]
subset_size_this_class = max(0, min(class_size, count_per_class))
if subset_size_this_class < 1 or this_class is None:
# warning if none were selected
warnings.warn('No subjects from class {} were selected.'.format(class_id))
else:
subsets_this_class = this_class[0:count_per_class]
subsets.extend(subsets_this_class)
if len(subsets) > 0:
return subsets
else:
warnings.warn('Zero samples were selected. Returning an empty list!')
return list() | python | {
"resource": ""
} |
q42726 | MLDataset.sample_ids_in_class | train | def sample_ids_in_class(self, class_id):
"""
Returns a list of sample ids belonging to a given class.
Parameters
----------
class_id : str
class id to query.
Returns
-------
subset_ids : list
List of sample ids belonging to a given class.
"""
# subset_ids = [sid for sid in self.keys if self.classes[sid] == class_id]
subset_ids = self.keys_with_value(self.classes, class_id)
return subset_ids | python | {
"resource": ""
} |
q42727 | MLDataset.get_data_matrix_in_order | train | def get_data_matrix_in_order(self, subset_ids):
"""
Returns a numpy array of features, rows in the same order as subset_ids
Parameters
----------
subset_ids : list
List od sample IDs to extracted from the dataset.
Returns
-------
matrix : ndarray
Matrix of features, for each id in subset_ids, in order.
"""
if len(subset_ids) < 1:
warnings.warn('subset must have atleast one ID - returning empty matrix!')
return np.empty((0, 0))
if isinstance(subset_ids, set):
raise TypeError('Input set is not ordered, hence can not guarantee order! '
'Must provide a list or tuple.')
if isinstance(subset_ids, str):
subset_ids = [subset_ids, ]
num_existing_keys = sum([1 for key in subset_ids if key in self.__data])
if num_existing_keys < len(subset_ids):
raise ValueError('One or more IDs from subset do not exist in the dataset!')
matrix = np.full((num_existing_keys, self.num_features), np.nan)
for idx, sid in enumerate(subset_ids):
matrix[idx, :] = self.__data[sid]
return matrix | python | {
"resource": ""
} |
q42728 | MLDataset.label_set | train | def label_set(self):
"""Set of labels in the dataset corresponding to class_set."""
label_set = list()
for class_ in self.class_set:
samples_in_class = self.sample_ids_in_class(class_)
label_set.append(self.labels[samples_in_class[0]])
return label_set | python | {
"resource": ""
} |
q42729 | MLDataset.add_classes | train | def add_classes(self, classes):
"""
Helper to rename the classes, if provided by a dict keyed in by the orignal keys
Parameters
----------
classes : dict
Dict of class named keyed in by sample IDs.
Raises
------
TypeError
If classes is not a dict.
ValueError
If all samples in dataset are not present in input dict,
or one of they samples in input is not recognized.
"""
if not isinstance(classes, dict):
raise TypeError('Input classes is not a dict!')
if not len(classes) == self.num_samples:
raise ValueError('Too few items - need {} keys'.format(self.num_samples))
if not all([key in self.keys for key in classes]):
raise ValueError('One or more unrecognized keys!')
self.__classes = classes | python | {
"resource": ""
} |
q42730 | MLDataset.__load | train | def __load(self, path):
"""Method to load the serialized dataset from disk."""
try:
path = os.path.abspath(path)
with open(path, 'rb') as df:
# loaded_dataset = pickle.load(df)
self.__data, self.__classes, self.__labels, \
self.__dtype, self.__description, \
self.__num_features, self.__feature_names = pickle.load(df)
# ensure the loaded dataset is valid
self.__validate(self.__data, self.__classes, self.__labels)
except IOError as ioe:
raise IOError('Unable to read the dataset from file: {}', format(ioe))
except:
raise | python | {
"resource": ""
} |
q42731 | MLDataset.__load_arff | train | def __load_arff(self, arff_path, encode_nonnumeric=False):
"""Loads a given dataset saved in Weka's ARFF format. """
try:
from scipy.io.arff import loadarff
arff_data, arff_meta = loadarff(arff_path)
except:
raise ValueError('Error loading the ARFF dataset!')
attr_names = arff_meta.names()[:-1] # last column is class
attr_types = arff_meta.types()[:-1]
if not encode_nonnumeric:
# ensure all the attributes are numeric
uniq_types = set(attr_types)
if 'numeric' not in uniq_types:
raise ValueError(
'Currently only numeric attributes in ARFF are supported!')
non_numeric = uniq_types.difference({'numeric'})
if len(non_numeric) > 0:
raise ValueError('Non-numeric features provided ({}), '
'without requesting encoding to numeric. '
'Try setting encode_nonnumeric=True '
'or encode features to numeric!'.format(non_numeric))
else:
raise NotImplementedError(
'encoding non-numeric features to numeric is not implemented yet! '
'Encode features beforing to ARFF.')
self.__description = arff_meta.name # to enable it as a label e.g. in neuropredict
# initializing the key containers, before calling self.add_sample
self.__data = OrderedDict()
self.__labels = OrderedDict()
self.__classes = OrderedDict()
num_samples = len(arff_data)
num_digits = len(str(num_samples))
make_id = lambda index: 'row{index:0{nd}d}'.format(index=index, nd=num_digits)
sample_classes = [cls.decode('utf-8') for cls in arff_data['class']]
class_set = set(sample_classes)
label_dict = dict()
# encoding class names to labels 1 to n
for ix, cls in enumerate(class_set):
label_dict[cls] = ix + 1
for index in range(num_samples):
sample = arff_data.take([index])[0].tolist()
sample_attrs = sample[:-1]
sample_class = sample[-1].decode('utf-8')
self.add_sample(sample_id=make_id(index), # ARFF rows do not have an ID
features=sample_attrs,
label=label_dict[sample_class],
class_id=sample_class)
# not necessary to set feature_names=attr_names for each sample,
# as we do it globally after loop
self.__feature_names = attr_names
return | python | {
"resource": ""
} |
q42732 | MLDataset.save | train | def save(self, file_path):
"""
Method to save the dataset to disk.
Parameters
----------
file_path : str
File path to save the current dataset to
Raises
------
IOError
If saving to disk is not successful.
"""
# TODO need a file format that is flexible and efficient to allow the following:
# 1) being able to read just meta info without having to load the ENTIRE dataset
# i.e. use case: compatibility check with #subjects, ids and their classes
# 2) random access layout: being able to read features for a single subject!
try:
file_path = os.path.abspath(file_path)
with open(file_path, 'wb') as df:
# pickle.dump(self, df)
pickle.dump((self.__data, self.__classes, self.__labels,
self.__dtype, self.__description, self.__num_features,
self.__feature_names),
df)
return
except IOError as ioe:
raise IOError('Unable to save the dataset to file: {}', format(ioe))
except:
raise | python | {
"resource": ""
} |
q42733 | MLDataset.__validate | train | def __validate(data, classes, labels):
"Validator of inputs."
if not isinstance(data, dict):
raise TypeError(
'data must be a dict! keys: sample ID or any unique identifier')
if not isinstance(labels, dict):
raise TypeError(
'labels must be a dict! keys: sample ID or any unique identifier')
if classes is not None:
if not isinstance(classes, dict):
raise TypeError(
'labels must be a dict! keys: sample ID or any unique identifier')
if not len(data) == len(labels) == len(classes):
raise ValueError('Lengths of data, labels and classes do not match!')
if not set(list(data)) == set(list(labels)) == set(list(classes)):
raise ValueError(
'data, classes and labels dictionaries must have the same keys!')
num_features_in_elements = np.unique([sample.size for sample in data.values()])
if len(num_features_in_elements) > 1:
raise ValueError(
'different samples have different number of features - invalid!')
return True | python | {
"resource": ""
} |
q42734 | get_meta | train | def get_meta(meta, name):
"""Retrieves the metadata variable 'name' from the 'meta' dict."""
assert name in meta
data = meta[name]
if data['t'] in ['MetaString', 'MetaBool']:
return data['c']
elif data['t'] == 'MetaInlines':
# Handle bug in pandoc 2.2.3 and 2.2.3.1: Return boolean value rather
# than strings, as appropriate.
if len(data['c']) == 1 and data['c'][0]['t'] == 'Str':
if data['c'][0]['c'] in ['true', 'True', 'TRUE']:
return True
elif data['c'][0]['c'] in ['false', 'False', 'FALSE']:
return False
return stringify(data['c'])
elif data['t'] == 'MetaList':
return [stringify(v['c']) for v in data['c']]
else:
raise RuntimeError("Could not understand metadata variable '%s'." %
name) | python | {
"resource": ""
} |
q42735 | _getel | train | def _getel(key, value):
"""Returns an element given a key and value."""
if key in ['HorizontalRule', 'Null']:
return elt(key, 0)()
elif key in ['Plain', 'Para', 'BlockQuote', 'BulletList',
'DefinitionList', 'HorizontalRule', 'Null']:
return elt(key, 1)(value)
return elt(key, len(value))(*value) | python | {
"resource": ""
} |
q42736 | quotify | train | def quotify(x):
"""Replaces Quoted elements in element list 'x' with quoted strings.
Pandoc uses the Quoted element in its json when --smart is enabled.
Output to TeX/pdf automatically triggers --smart.
stringify() ignores Quoted elements. Use quotify() first to replace
Quoted elements in 'x' with quoted strings. 'x' should be a deep copy so
that the underlying document is left untouched.
Returns x."""
def _quotify(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaced Quoted elements with quoted strings."""
if key == 'Quoted':
ret = []
quote = '"' if value[0]['t'] == 'DoubleQuote' else "'"
if value[1][0]['t'] == 'Str':
value[1][0]['c'] = quote + value[1][0]['c']
else:
ret.append(Str(quote))
if value[1][-1]['t'] == 'Str':
value[1][-1]['c'] = value[1][-1]['c'] + quote
ret += value[1]
else:
ret += value[1] + [Str(quote)]
return ret
return None
return walk(walk(x, _quotify, '', {}), join_strings, '', {}) | python | {
"resource": ""
} |
q42737 | extract_attrs | train | def extract_attrs(x, n):
"""Extracts attributes from element list 'x' beginning at index 'n'.
The elements encapsulating the attributes (typically a series of Str and
Space elements) are removed from 'x'. Items before index 'n' are left
unchanged.
Returns the attributes in pandoc format. A ValueError is raised if
attributes aren't found. An IndexError is raised if the index 'n' is out
of range."""
# Check for the start of the attributes string
if not (x[n]['t'] == 'Str' and x[n]['c'].startswith('{')):
raise ValueError('Attributes not found.')
# It starts with {, so this *may* be an attributes list. Search for where
# the attributes end. Do not consider } in quoted elements.
seq = [] # A sequence of saved values
quotechar = None # Used to keep track of quotes in strings
flag = False # Flags that an attributes list was found
i = 0 # Initialization
for i, v in enumerate(x[n:]): # Scan through the list
if v and v['t'] == 'Str':
# Scan for } outside of a quote
for j, c in enumerate(v['c']):
if c == quotechar: # This is an end quote
quotechar = None
elif c in ['"', "'"]: # This is an open quote
quotechar = c
elif c == '}' and quotechar is None: # The attributes end here
# Split the string at the } and save the pieces
head, tail = v['c'][:j+1], v['c'][j+1:]
x[n+i] = copy.deepcopy(v)
x[n+i]['c'] = tail
v['c'] = head
flag = True
break
seq.append(v)
if flag:
break
if flag: # Attributes string was found, so process it
# Delete empty and extracted elements
if x[n+i]['t'] == 'Str' and not x[n+i]['c']:
del x[n+i]
del x[n:n+i]
# Process the attrs
attrstr = stringify(dollarfy(quotify(seq))).strip()
attrs = PandocAttributes(attrstr, 'markdown').to_pandoc()
# Remove extranneous quotes from kvs
for i, (k, v) in enumerate(attrs[2]): # pylint: disable=unused-variable
if v[0] == v[-1] == '"' or v[0] == "'" == v[-1] == "'":
attrs[2][i][1] = attrs[2][i][1][1:-1]
# We're done
return attrs
# Attributes not found
raise ValueError('Attributes not found.') | python | {
"resource": ""
} |
q42738 | _join_strings | train | def _join_strings(x):
"""Joins adjacent Str elements found in the element list 'x'."""
for i in range(len(x)-1): # Process successive pairs of elements
if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str':
x[i]['c'] += x[i+1]['c']
del x[i+1] # In-place deletion of element from list
return None # Forces processing to repeat
return True | python | {
"resource": ""
} |
q42739 | join_strings | train | def join_strings(key, value, fmt, meta): # pylint: disable=unused-argument
"""Joins adjacent Str elements in the 'value' list."""
if key in ['Para', 'Plain']:
_join_strings(value)
elif key == 'Image':
_join_strings(value[-2])
elif key == 'Table':
_join_strings(value[-5]) | python | {
"resource": ""
} |
q42740 | _is_broken_ref | train | def _is_broken_ref(key1, value1, key2, value2):
"""True if this is a broken reference; False otherwise."""
# A link followed by a string may represent a broken reference
if key1 != 'Link' or key2 != 'Str':
return False
# Assemble the parts
n = 0 if _PANDOCVERSION < '1.16' else 1
if isinstance(value1[n][0]['c'], list):
# Occurs when there is quoted text in an actual link. This is not
# a broken link. See Issue #1.
return False
s = value1[n][0]['c'] + value2
# Return True if this matches the reference regex
return True if _REF.match(s) else False | python | {
"resource": ""
} |
q42741 | _repair_refs | train | def _repair_refs(x):
"""Performs the repair on the element list 'x'."""
if _PANDOCVERSION is None:
raise RuntimeError('Module uninitialized. Please call init().')
# Scan the element list x
for i in range(len(x)-1):
# Check for broken references
if _is_broken_ref(x[i]['t'], x[i]['c'] if 'c' in x[i] else [],
x[i+1]['t'], x[i+1]['c'] if 'c' in x[i+1] else []):
# Get the reference string
n = 0 if _PANDOCVERSION < '1.16' else 1
s = x[i]['c'][n][0]['c'] + x[i+1]['c']
# Chop it into pieces. Note that the prefix and suffix may be
# parts of other broken references.
prefix, label, suffix = _REF.match(s).groups()
# Insert the suffix, label and prefix back into x. Do it in this
# order so that the indexing works.
if suffix:
x.insert(i+2, Str(suffix))
x[i+1] = Cite(
[{"citationId":label,
"citationPrefix":[],
"citationSuffix":[],
"citationNoteNum":0,
"citationMode":{"t":"AuthorInText", "c":[]},
"citationHash":0}],
[Str('@' + label)])
if prefix:
if i > 0 and x[i-1]['t'] == 'Str':
x[i-1]['c'] = x[i-1]['c'] + prefix
del x[i]
else:
x[i] = Str(prefix)
else:
del x[i]
return None # Forces processing to repeat
return True | python | {
"resource": ""
} |
q42742 | _remove_brackets | train | def _remove_brackets(x, i):
"""Removes curly brackets surrounding the Cite element at index 'i' in
the element list 'x'. It is assumed that the modifier has been
extracted. Empty strings are deleted from 'x'."""
assert x[i]['t'] == 'Cite'
assert i > 0 and i < len(x) - 1
# Check if the surrounding elements are strings
if not x[i-1]['t'] == x[i+1]['t'] == 'Str':
return
# Trim off curly brackets
if x[i-1]['c'].endswith('{') and x[i+1]['c'].startswith('}'):
if len(x[i+1]['c']) > 1:
x[i+1]['c'] = x[i+1]['c'][1:]
else:
del x[i+1]
if len(x[i-1]['c']) > 1:
x[i-1]['c'] = x[i-1]['c'][:-1]
else:
del x[i-1] | python | {
"resource": ""
} |
q42743 | Movies.search | train | def search(self, **kwargs):
"""Get movies that match the search query string from the API.
Args:
q (optional): plain text search query; remember to URI encode
page_limit (optional): number of search results to show per page,
default=30
page (optional): results page number, default=1
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('search')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | python | {
"resource": ""
} |
q42744 | Movies.cast | train | def cast(self, **kwargs):
"""Get the cast for a movie specified by id from the API.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('cast')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | python | {
"resource": ""
} |
q42745 | Movies.clips | train | def clips(self, **kwargs):
"""Get related clips and trailers for a movie specified by id
from the API.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('clips')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | python | {
"resource": ""
} |
q42746 | debug | train | def debug(value):
"""
Simple tag to debug output a variable;
Usage:
{% debug request %}
"""
print("%s %s: " % (type(value), value))
print(dir(value))
print('\n\n')
return '' | python | {
"resource": ""
} |
q42747 | get_sample_data | train | def get_sample_data(sample_file):
"""Read and returns sample data to fill form with default sample sequence. """
sequence_sample_in_fasta = None
with open(sample_file) as handle:
sequence_sample_in_fasta = handle.read()
return sequence_sample_in_fasta | python | {
"resource": ""
} |
q42748 | blast_records_to_object | train | def blast_records_to_object(blast_records):
"""Transforms biopython's blast record into blast object defined in django-blastplus app. """
# container for transformed objects
blast_objects_list = []
for blast_record in blast_records:
br = BlastRecord(**{'query': blast_record.query,
'version': blast_record.version,
'expect': blast_record.expect,
'application': blast_record.application,
'reference': blast_record.reference})
for alignment in blast_record.alignments:
al = Alignment(**{
'hit_def': alignment.hit_def,
'title': alignment.title,
'length': alignment.length,
})
for hsp in alignment.hsps:
h = Hsp(**{
'align_length': hsp.align_length,
'bits': hsp.bits,
'expect': hsp.expect,
'frame': hsp.frame,
'gaps': hsp.gaps,
'identities': hsp.identities,
'match': hsp.match,
'num_alignments': hsp.num_alignments,
'positives': hsp.positives,
'query': hsp.query,
'query_end': hsp.query_end,
'query_start': hsp.query_start,
'sbjct': hsp.sbjct,
'sbjct_end': hsp.sbjct_end,
'sbjct_start': hsp.sbjct_start,
'score': hsp.score,
'strand': hsp.strand,
'str': str(hsp),
})
al.hsp_list.append(h)
br.alignments.append(al)
blast_objects_list.append(br)
return blast_objects_list | python | {
"resource": ""
} |
q42749 | get_annotation | train | def get_annotation(db_path, db_list):
""" Checks if database is set as annotated. """
annotated = False
for db in db_list:
if db["path"] == db_path:
annotated = db["annotated"]
break
return annotated | python | {
"resource": ""
} |
q42750 | find_usbserial | train | def find_usbserial(vendor, product):
"""Find the tty device for a given usbserial devices identifiers.
Args:
vendor: (int) something like 0x0000
product: (int) something like 0x0000
Returns:
String, like /dev/ttyACM0 or /dev/tty.usb...
"""
if platform.system() == 'Linux':
vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)]
return linux_find_usbserial(vendor, product)
elif platform.system() == 'Darwin':
return osx_find_usbserial(vendor, product)
else:
raise NotImplementedError('Cannot find serial ports on %s'
% platform.system()) | python | {
"resource": ""
} |
q42751 | CustomFieldsBuilder.create_values | train | def create_values(self, base_model=models.Model, base_manager=models.Manager):
"""
This method will create a model which will hold field values for
field types of custom_field_model.
:param base_model:
:param base_manager:
:return:
"""
_builder = self
class CustomContentTypeFieldValueManager(base_manager):
def create(self, **kwargs):
"""
Subclass create in order to be able to use "value" in kwargs
instead of using "value_%s" passing also type directly
"""
if 'value' in kwargs:
value = kwargs.pop('value')
created_object = super(CustomContentTypeFieldValueManager, self).create(**kwargs)
created_object.value = value
return created_object
else:
return super(CustomContentTypeFieldValueManager, self).create(**kwargs)
@python_2_unicode_compatible
class CustomContentTypeFieldValue(base_model):
custom_field = models.ForeignKey('.'.join(_builder.fields_model),
verbose_name=_('custom field'),
related_name='+')
content_type = models.ForeignKey(ContentType, editable=False,
verbose_name=_('content type'),
limit_choices_to=_builder.content_types_query)
object_id = models.PositiveIntegerField(_('object id'), db_index=True)
content_object = GenericForeignKey('content_type', 'object_id')
value_text = models.TextField(blank=True, null=True)
value_integer = models.IntegerField(blank=True, null=True)
value_float = models.FloatField(blank=True, null=True)
value_time = models.TimeField(blank=True, null=True)
value_date = models.DateField(blank=True, null=True)
value_datetime = models.DateTimeField(blank=True, null=True)
value_boolean = models.NullBooleanField(blank=True)
objects = CustomContentTypeFieldValueManager()
def _get_value(self):
return getattr(self, 'value_%s' % self.custom_field.data_type)
def _set_value(self, new_value):
setattr(self, 'value_%s' % self.custom_field.data_type, new_value)
value = property(_get_value, _set_value)
class Meta:
unique_together = ('custom_field', 'content_type', 'object_id')
verbose_name = _('custom field value')
verbose_name_plural = _('custom field values')
abstract = True
def save(self, *args, **kwargs):
# save content type as user shouldn't be able to change it
self.content_type = self.custom_field.content_type
super(CustomContentTypeFieldValue, self).save(*args, **kwargs)
def validate_unique(self, exclude=None):
qs = self.__class__._default_manager.filter(
custom_field=self.custom_field,
content_type=self.custom_field.content_type,
object_id=self.object_id,
)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
raise ValidationError({ NON_FIELD_ERRORS: (_('A value for this custom field already exists'),) })
def __str__(self):
return "%s: %s" % (self.custom_field.name, self.value)
return CustomContentTypeFieldValue | python | {
"resource": ""
} |
q42752 | CustomFieldsBuilder.create_manager | train | def create_manager(self, base_manager=models.Manager):
"""
This will create the custom Manager that will use the fields_model and values_model
respectively.
:param base_manager: the base manager class to inherit from
:return:
"""
_builder = self
class CustomManager(base_manager):
def search(self, search_data, custom_args={}):
"""
Search inside the custom fields for this model for any match
of search_data and returns existing model instances
:param search_data:
:param custom_args:
:return:
"""
query = None
lookups = (
'%s__%s' % ('value_text', 'icontains'),
)
content_type = ContentType.objects.get_for_model(self.model)
custom_args = dict({ 'content_type': content_type, 'searchable': True }, **custom_args)
custom_fields = dict((f.name, f) for f in _builder.fields_model_class.objects.filter(**custom_args))
for value_lookup in lookups:
for key, f in custom_fields.items():
found = _builder.values_model_class.objects.filter(**{ 'custom_field': f,
'content_type': content_type,
value_lookup: search_data })
if found.count() > 0:
if query is None:
query = Q()
query = query & Q(**{ str('%s__in' % self.model._meta.pk.name):
[obj.object_id for obj in found] })
if query is None:
return self.get_queryset().none()
return self.get_queryset().filter(query)
return CustomManager | python | {
"resource": ""
} |
q42753 | CustomFieldsBuilder.create_mixin | train | def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin | python | {
"resource": ""
} |
q42754 | bytes_iter | train | def bytes_iter(obj):
"""Turn a complex object into an iterator of byte strings.
The resulting iterator can be used for caching.
"""
if obj is None:
return
elif isinstance(obj, six.binary_type):
yield obj
elif isinstance(obj, six.string_types):
yield obj
elif isinstance(obj, (date, datetime)):
yield obj.isoformat()
elif is_mapping(obj):
for key in sorted(obj.keys()):
for out in chain(bytes_iter(key), bytes_iter(obj[key])):
yield out
elif is_sequence(obj):
if isinstance(obj, (list, set)):
try:
obj = sorted(obj)
except Exception:
pass
for item in obj:
for out in bytes_iter(item):
yield out
elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)):
yield getattr(obj, 'func_name', '')
else:
yield six.text_type(obj) | python | {
"resource": ""
} |
q42755 | hash_data | train | def hash_data(obj):
"""Generate a SHA1 from a complex object."""
collect = sha1()
for text in bytes_iter(obj):
if isinstance(text, six.text_type):
text = text.encode('utf-8')
collect.update(text)
return collect.hexdigest() | python | {
"resource": ""
} |
q42756 | _ImportsFinder.visit_Import | train | def visit_Import(self, node):
"""callback for 'import' statement"""
self.imports.extend((None, n.name, n.asname, None)
for n in node.names)
ast.NodeVisitor.generic_visit(self, node) | python | {
"resource": ""
} |
q42757 | _ImportsFinder.visit_ImportFrom | train | def visit_ImportFrom(self, node):
"""callback for 'import from' statement"""
self.imports.extend((node.module, n.name, n.asname, node.level)
for n in node.names)
ast.NodeVisitor.generic_visit(self, node) | python | {
"resource": ""
} |
q42758 | ModuleSet._get_imported_module | train | def _get_imported_module(self, module_name):
"""try to get imported module reference by its name"""
# if imported module on module_set add to list
imp_mod = self.by_name.get(module_name)
if imp_mod:
return imp_mod
# last part of import section might not be a module
# remove last section
no_obj = module_name.rsplit('.', 1)[0]
imp_mod2 = self.by_name.get(no_obj)
if imp_mod2:
return imp_mod2
# special case for __init__
if module_name in self.pkgs:
pkg_name = module_name + ".__init__"
return self.by_name[pkg_name]
if no_obj in self.pkgs:
pkg_name = no_obj + ".__init__"
return self.by_name[pkg_name] | python | {
"resource": ""
} |
q42759 | run_airbnb_demo | train | def run_airbnb_demo(data_dir):
"""HyperTransfomer will transform back and forth data airbnb data."""
# Setup
meta_file = os.path.join(data_dir, 'Airbnb_demo_meta.json')
transformer_list = ['NumberTransformer', 'DTTransformer', 'CatTransformer']
ht = HyperTransformer(meta_file)
# Run
transformed = ht.fit_transform(transformer_list=transformer_list)
result = ht.reverse_transform(tables=transformed)
# Check
assert result.keys() == ht.table_dict.keys()
for name, table in result.items():
assert not result[name].isnull().all().all() | python | {
"resource": ""
} |
q42760 | WinEventLog.eventlog | train | def eventlog(self, path):
"""Iterates over the Events contained within the log at the given path.
For each Event, yields a XML string.
"""
self.logger.debug("Parsing Event log file %s.", path)
with NamedTemporaryFile(buffering=0) as tempfile:
self._filesystem.download(path, tempfile.name)
file_header = FileHeader(tempfile.read(), 0)
for xml_string, _ in evtx_file_xml_view(file_header):
yield xml_string | python | {
"resource": ""
} |
q42761 | publish_message_to_centrifugo | train | def publish_message_to_centrifugo(sender, instance, created, **kwargs):
""" Publishes each saved message to Centrifugo. """
if created is True:
client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET"))
# we ensure the client is still in the thread (he may have left or have been removed)
active_participants = [participation.participant.id for participation in Participation.objects.filter(thread=instance.thread, date_left__isnull=True).select_related('participant')]
client.publish(
build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.thread.id, active_participants),
{
"id": instance.id,
"body": instance.body,
"sender": instance.sender.id,
"thread": instance.thread.id,
"sent_at": str(instance.sent_at),
"is_notification": True, # ATTENTION: check against sender too to be sure to not notify him his message
}
) | python | {
"resource": ""
} |
q42762 | publish_participation_to_thread | train | def publish_participation_to_thread(sender, instance, created, **kwargs):
""" Warns users everytime a thread including them is published. This is done via channel subscription. """
if kwargs.get('created_and_add_participants') is True:
request_participant_id = kwargs.get('request_participant_id')
if request_participant_id is not None:
client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET"))
active_participants = [participation.participant for participation in Participation.objects.filter(thread=instance, date_left__isnull=True).select_related('participant')]
for participant in active_participants:
client.publish(
build_channel(settings.CENTRIFUGO_THREAD_NAMESPACE, participant.id, [participant.id]),
{
"message_channel_to_connect_to": build_channel(settings.CENTRIFUGO_MESSAGE_NAMESPACE, instance.id, [p.id for p in active_participants])
}
) | python | {
"resource": ""
} |
q42763 | WebGetRobust.__pre_check | train | def __pre_check(self, requestedUrl):
'''
Allow the pre-emptive fetching of sites with a full browser if they're known
to be dick hosters.
'''
components = urllib.parse.urlsplit(requestedUrl)
netloc_l = components.netloc.lower()
if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS:
self.__check_suc_cookie(components)
elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS:
self.__check_cf_cookie(components)
elif components.path == '/sucuri_shit_2':
self.__check_suc_cookie(components)
elif components.path == '/sucuri_shit_3':
self.__check_suc_cookie(components)
elif components.path == '/cloudflare_under_attack_shit_2':
self.__check_cf_cookie(components)
elif components.path == '/cloudflare_under_attack_shit_3':
self.__check_cf_cookie(components) | python | {
"resource": ""
} |
q42764 | WebGetRobust.__decompressContent | train | def __decompressContent(self, coding, pgctnt):
"""
This is really obnoxious
"""
#preLen = len(pgctnt)
if coding == 'deflate':
compType = "deflate"
bits_opts = [
-zlib.MAX_WBITS, # deflate
zlib.MAX_WBITS, # zlib
zlib.MAX_WBITS | 16, # gzip
zlib.MAX_WBITS | 32, # "automatic header detection"
0, # Try to guess from header
# Try all the raw window options.
-8, -9, -10, -11, -12, -13, -14, -15,
# Stream with zlib headers
8, 9, 10, 11, 12, 13, 14, 15,
# With gzip header+trailer
8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16,
# Automatic detection
8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32,
]
err = None
for wbits_val in bits_opts:
try:
pgctnt = zlib.decompress(pgctnt, wbits_val)
return compType, pgctnt
except zlib.error as e:
err = e
# We can't get here without err having thrown.
raise err
elif coding == 'gzip':
compType = "gzip"
buf = io.BytesIO(pgctnt)
f = gzip.GzipFile(fileobj=buf)
pgctnt = f.read()
elif coding == "sdch":
raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq)
else:
compType = "none"
return compType, pgctnt | python | {
"resource": ""
} |
q42765 | WebGetRobust.addSeleniumCookie | train | def addSeleniumCookie(self, cookieDict):
'''
Install a cookie exported from a selenium webdriver into
the active opener
'''
# print cookieDict
cookie = http.cookiejar.Cookie(
version = 0,
name = cookieDict['name'],
value = cookieDict['value'],
port = None,
port_specified = False,
domain = cookieDict['domain'],
domain_specified = True,
domain_initial_dot = False,
path = cookieDict['path'],
path_specified = False,
secure = cookieDict['secure'],
expires = cookieDict['expiry'] if 'expiry' in cookieDict else None,
discard = False,
comment = None,
comment_url = None,
rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False},
rfc2109 = False
)
self.addCookie(cookie) | python | {
"resource": ""
} |
q42766 | mail_on_500 | train | def mail_on_500(app, recipients, sender='noreply@localhost'):
'''Main function for setting up Flask-ErrorMail to send e-mails when 500
errors occur.
:param app: Flask Application Object
:type app: flask.Flask
:param recipients: List of recipient email addresses.
:type recipients: list or tuple
:param sender: Email address that should be listed as the sender. Defaults
to 'noreply@localhost'
:type sender: string
'''
#importing locally, so that the dependencies are only required if
# mail_on_500 is used.
from flask import request as __request
from flask_mail import Mail as __Mail
from flask_mail import Message as __Message
mail = __Mail(app)
# create a closure to track the sender and recipients
def email_exception(exception):
'''Handles the exception message from Flask by sending an email to the
recipients defined in the call to mail_on_500.
'''
msg = __Message("[Flask|ErrorMail] Exception Detected",
sender=sender,
recipients=recipients)
msg_contents = [
'Traceback:',
'='*80,
traceback.format_exc(),
]
msg_contents.append('\n')
msg_contents.append('Request Information:')
msg_contents.append('='*80)
environ = __request.environ
environkeys = sorted(environ.keys())
for key in environkeys:
msg_contents.append('%s: %s' % (key, environ.get(key)))
msg.body = '\n'.join(msg_contents) + '\n'
mail.send(msg)
app.register_error_handler(500, email_exception) | python | {
"resource": ""
} |
q42767 | dependencies | train | def dependencies(dist, recursive=False, info=False):
"""Yield distribution's dependencies."""
def case_sorted(items):
"""Return unique list sorted in case-insensitive order."""
return sorted(set(items), key=lambda i: i.lower())
def requires(distribution):
"""Return the requirements for a distribution."""
if recursive:
req = set(pkg_resources.require(distribution.project_name))
req.remove(distribution)
return {r.as_requirement() for r in req}
return distribution.requires()
def modifier(distribution):
"""Return project's name or full requirement string."""
return str(distribution) if info else distribution.project_name
return case_sorted(modifier(r) for r in requires(dist)) | python | {
"resource": ""
} |
q42768 | user_group_perms_processor | train | def user_group_perms_processor(request):
"""
return context variables with org permissions to the user.
"""
org = None
group = None
if hasattr(request, "user"):
if request.user.is_anonymous:
group = None
else:
group = request.user.get_org_group()
org = request.user.get_org()
if group:
context = dict(org_perms=GroupPermWrapper(group))
else:
context = dict()
# make sure user_org is set on our request based on their session
context["user_org"] = org
return context | python | {
"resource": ""
} |
q42769 | set_org_processor | train | def set_org_processor(request):
"""
Simple context processor that automatically sets 'org' on the context if it
is present in the request.
"""
if getattr(request, "org", None):
org = request.org
pattern_bg = org.backgrounds.filter(is_active=True, background_type="P")
pattern_bg = pattern_bg.order_by("-pk").first()
banner_bg = org.backgrounds.filter(is_active=True, background_type="B")
banner_bg = banner_bg.order_by("-pk").first()
return dict(org=org, pattern_bg=pattern_bg, banner_bg=banner_bg)
else:
return dict() | python | {
"resource": ""
} |
q42770 | TwoCaptchaSolver._submit | train | def _submit(self, pathfile, filedata, filename):
'''
Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task.
'''
if pathfile and os.path.exists(pathfile):
files = {'file': open(pathfile, 'rb')}
elif filedata:
assert filename
files = {'file' : (filename, io.BytesIO(filedata))}
else:
raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!")
payload = {
'key' : self.api_key,
'method' : 'post',
'json' : True,
}
self.log.info("Uploading to 2Captcha.com.")
url = self.getUrlFor('input', {})
request = requests.post(url, files=files, data=payload)
if not request.ok:
raise exc.CaptchaSolverFailure("Posting captcha to solve failed!")
resp_json = json.loads(request.text)
return self._process_response(resp_json) | python | {
"resource": ""
} |
q42771 | mine_urls | train | def mine_urls(urls, params=None, callback=None, **kwargs):
"""Concurrently retrieve URLs.
:param urls: A set of URLs to concurrently retrieve.
:type urls: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
"""
miner = Miner(**kwargs)
try:
miner.loop.add_signal_handler(signal.SIGINT, miner.close)
miner.loop.run_until_complete(miner.mine_urls(urls, params, callback))
except RuntimeError:
pass | python | {
"resource": ""
} |
q42772 | mine_items | train | def mine_items(identifiers, params=None, callback=None, **kwargs):
"""Concurrently retrieve metadata from Archive.org items.
:param identifiers: A set of Archive.org item identifiers to mine.
:type identifiers: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
"""
miner = ItemMiner(**kwargs)
try:
miner.loop.run_until_complete(miner.mine_items(identifiers, params, callback))
except RuntimeError:
miner.loop.close() | python | {
"resource": ""
} |
q42773 | configure | train | def configure(username=None, password=None, overwrite=None, config_file=None):
"""Configure IA Mine with your Archive.org credentials."""
username = input('Email address: ') if not username else username
password = getpass('Password: ') if not password else password
_config_file = write_config_file(username, password, overwrite, config_file)
print('\nConfig saved to: {}'.format(_config_file)) | python | {
"resource": ""
} |
q42774 | StockRetriever.__get_time_range | train | def __get_time_range(self, startDate, endDate):
"""Return time range
"""
today = date.today()
start_date = today - timedelta(days=today.weekday(), weeks=1)
end_date = start_date + timedelta(days=4)
startDate = startDate if startDate else str(start_date)
endDate = endDate if endDate else str(end_date)
return startDate, endDate | python | {
"resource": ""
} |
q42775 | StockRetriever.get_industry_index | train | def get_industry_index(self, index_id,items=None):
"""retrieves all symbols that belong to an industry.
"""
response = self.select('yahoo.finance.industry',items).where(['id','=',index_id])
return response | python | {
"resource": ""
} |
q42776 | StockRetriever.get_dividendhistory | train | def get_dividendhistory(self, symbol, startDate, endDate, items=None):
"""Retrieves divident history
"""
startDate, endDate = self.__get_time_range(startDate, endDate)
response = self.select('yahoo.finance.dividendhistory', items).where(['symbol', '=', symbol], ['startDate', '=', startDate], ['endDate', '=', endDate])
return response | python | {
"resource": ""
} |
q42777 | StockRetriever.get_symbols | train | def get_symbols(self, name):
"""Retrieves all symbols belonging to a company
"""
url = "http://autoc.finance.yahoo.com/autoc?query={0}&callback=YAHOO.Finance.SymbolSuggest.ssCallback".format(name)
response = requests.get(url)
json_data = re.match("YAHOO\.Finance\.SymbolSuggest.ssCallback\((.*)\)", response.text)
try:
json_data = json_data.groups()[0]
except (Exception,) as e:
print(e)
json_data = '{"results": "Webservice seems to be down"}'
return type('response', (requests.Response,),{
'text' : json_data,
'content': json_data.encode(),
'status_code': response.status_code,
'reason': response.reason,
'encoding': response.encoding,
'apparent_encoding': response.apparent_encoding,
'cookies': response.cookies,
'headers': response.headers,
'json': lambda : json.loads(json_data),
'url': response.url
}) | python | {
"resource": ""
} |
q42778 | fromJson | train | def fromJson(struct, attributes=None):
"Convert a JSON struct to a Geometry based on its structure"
if isinstance(struct, basestring):
struct = json.loads(struct)
indicative_attributes = {
'x': Point,
'wkid': SpatialReference,
'paths': Polyline,
'rings': Polygon,
'points': Multipoint,
'xmin': Envelope
}
# bbox string
if isinstance(struct, basestring) and len(struct.split(',')) == 4:
return Envelope(*map(float, struct.split(',')))
# Look for telltale attributes in the dict
if isinstance(struct, dict):
for key, cls in indicative_attributes.iteritems():
if key in struct:
ret = cls.fromJson(dict((str(key), value)
for (key, value) in struct.iteritems()))
if attributes:
ret.attributes = dict((str(key.lower()), val)
for (key, val)
in attributes.iteritems())
return ret
raise ValueError("Unconvertible to geometry") | python | {
"resource": ""
} |
q42779 | fromGeoJson | train | def fromGeoJson(struct, attributes=None):
"Convert a GeoJSON-like struct to a Geometry based on its structure"
if isinstance(struct, basestring):
struct = json.loads(struct)
type_map = {
'Point': Point,
'MultiLineString': Polyline,
'LineString': Polyline,
'Polygon': Polygon,
'MultiPolygon': Polygon,
'MultiPoint': Multipoint,
'Box': Envelope
}
if struct['type'] == "Feature":
return fromGeoJson(struct, struct.get('properties', None))
elif struct['type'] == "FeatureCollection":
sr = None
if 'crs' in struct:
sr = SpatialReference(struct['crs']['properties']['code'])
members = map(fromGeoJson, struct['members'])
for member in members:
member.spatialReference = sr
return members
else:
return map(fromGeoJson, struct['members'])
elif struct['type'] in type_map and hasattr(type_map[struct['type']],
'fromGeoJson'):
instances = type_map[struct['type']].fromGeoJson(struct)
i = []
assert instances is not None, "GeoJson conversion returned a Null geom"
for instance in instances:
if 'properties' in struct:
instance.attributes = struct['properties'].copy()
if '@esri.sr' in instance.attributes:
instance.spatialReference = SpatialReference.fromJson(
instance.attributes['@esri.sr'])
del instance.attributes['@esri.sr']
if attributes:
if not hasattr(instance, 'attributes'):
instance.attributes = {}
for k, v in attributes.iteritems():
instance.attributes[k] = v
i.append(instance)
if i:
if len(i) > 1:
return i
return i[0]
raise ValueError("Unconvertible to geometry") | python | {
"resource": ""
} |
q42780 | Polygon.contains | train | def contains(self, pt):
"Tests if the provided point is in the polygon."
if isinstance(pt, Point):
ptx, pty = pt.x, pt.y
assert (self.spatialReference is None or \
self.spatialReference.wkid is None) or \
(pt.spatialReference is None or \
pt.spatialReference.wkid is None) or \
self.spatialReference == pt.spatialReference, \
"Spatial references do not match."
else:
ptx, pty = pt
in_shape = False
# Ported nearly line-for-line from the Javascript API
for ring in self._json_rings:
for idx in range(len(ring)):
idxp1 = idx + 1
if idxp1 >= len(ring):
idxp1 -= len(ring)
pi, pj = ring[idx], ring[idxp1]
# Divide-by-zero checks
if (pi[1] == pj[1]) and pty >= min((pi[1], pj[1])):
if ptx >= max((pi[0], pj[0])):
in_shape = not in_shape
elif (pi[0] == pj[0]) and pty >= min((pi[0], pj[0])):
if ptx >= max((pi[1], pj[1])):
in_shape = not in_shape
elif (((pi[1] < pty and pj[1] >= pty) or
(pj[1] < pty and pi[1] >= pty)) and
(pi[0] + (pty - pi[1]) /
(pj[1] - pi[1]) * (pj[0] - pi[0]) < ptx)):
in_shape = not in_shape
return in_shape | python | {
"resource": ""
} |
q42781 | VulnScanner.scan | train | def scan(self, concurrency=1):
"""Iterates over the applications installed within the disk
and queries the CVE DB to determine whether they are vulnerable.
Concurrency controls the amount of concurrent queries
against the CVE DB.
For each vulnerable application the method yields a namedtuple:
VulnApp(name -> application name
version -> application version
vulnerabilities) -> list of Vulnerabilities
Vulnerability(id -> CVE Id
summary) -> brief description of the vulnerability
"""
self.logger.debug("Scanning FS content.")
with ThreadPoolExecutor(max_workers=concurrency) as executor:
results = executor.map(self.query_vulnerabilities,
self.applications())
for report in results:
application, vulnerabilities = report
vulnerabilities = list(lookup_vulnerabilities(application.version,
vulnerabilities))
if vulnerabilities:
yield VulnApp(application.name,
application.version,
vulnerabilities) | python | {
"resource": ""
} |
q42782 | execute_ping | train | def execute_ping(host_list, remote_user, remote_pass,
sudo=False, sudo_user=None, sudo_pass=None):
'''
Execute ls on some hosts
'''
runner = spam.ansirunner.AnsibleRunner()
result, failed_hosts = runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
sudo=sudo,
sudo_pass=sudo_pass,
sudo_user=sudo_user,
module="ping")
print result, failed_hosts
dark_hosts = runner.ansible_get_dark_hosts(result)
print "dark hosts: ", dark_hosts | python | {
"resource": ""
} |
q42783 | execute_ls | train | def execute_ls(host_list, remote_user, remote_pass):
'''
Execute any adhoc command on the hosts.
'''
runner = spam.ansirunner.AnsibleRunner()
result, failed_hosts = runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
module="command",
module_args="ls -1")
print "Result: ", result | python | {
"resource": ""
} |
q42784 | compiler_preprocessor_verbose | train | def compiler_preprocessor_verbose(compiler, extraflags):
"""Capture the compiler preprocessor stage in verbose mode
"""
lines = []
with open(os.devnull, 'r') as devnull:
cmd = [compiler, '-E']
cmd += extraflags
cmd += ['-', '-v']
p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE)
p.wait()
p.stdout.close()
lines = p.stderr.read()
lines = lines.decode('utf-8')
lines = lines.splitlines()
return lines | python | {
"resource": ""
} |
q42785 | NumberTransformer.get_val | train | def get_val(self, x):
"""Converts to int."""
try:
if self.subtype == 'integer':
return int(round(x[self.col_name]))
else:
if np.isnan(x[self.col_name]):
return self.default_val
return x[self.col_name]
except (ValueError, TypeError):
return self.default_val | python | {
"resource": ""
} |
q42786 | NumberTransformer.safe_round | train | def safe_round(self, x):
"""Returns a converter that takes in a value and turns it into an integer, if necessary.
Args:
col_name(str): Name of the column.
subtype(str): Numeric subtype of the values.
Returns:
function
"""
val = x[self.col_name]
if np.isposinf(val):
val = sys.maxsize
elif np.isneginf(val):
val = -sys.maxsize
if np.isnan(val):
val = self.default_val
if self.subtype == 'integer':
return int(round(val))
return val | python | {
"resource": ""
} |
q42787 | Rados.rados_df | train | def rados_df(self,
host_list=None,
remote_user=None,
remote_pass=None):
'''
Invoked the rados df command and return output to user
'''
result, failed_hosts = self.runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
module="command",
module_args="rados df")
parsed_result = self.rados_parse_df(result)
return parsed_result | python | {
"resource": ""
} |
q42788 | Rados.rados_parse_df | train | def rados_parse_df(self,
result):
'''
Parse the result from ansirunner module and save it as a json
object
'''
parsed_results = []
HEADING = r".*(pool name) *(category) *(KB) *(objects) *(clones)" + \
" *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)"
HEADING_RE = re.compile(HEADING,
re.IGNORECASE)
dict_keys = ["pool_name", "category", "size_kb", "objects",
"clones", "degraded", "unfound", "rd", "rd_kb",
"wr", "wr_kb"]
if result['contacted'].keys():
for node in result['contacted'].keys():
df_result = {}
nodeobj = result['contacted'][node]
df_output = nodeobj['stdout']
for line in df_output.splitlines():
print "Line: ", line
# Skip the heading line.
reobj = HEADING_RE.match(line)
if not reobj:
row = line.split()
if len(row) != len(dict_keys):
print "line not match: ", line
continue
key_count = 0
for column in row:
df_result[dict_keys[key_count]] = column
key_count += 1
print "df_result: ", df_result
parsed_results.append(df_result)
nodeobj['parsed_results'] = parsed_results
return result | python | {
"resource": ""
} |
q42789 | update_roles_gce | train | def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE | python | {
"resource": ""
} |
q42790 | eventsource_connect | train | def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None):
"""Client-side eventsource support.
Takes a url and returns a Future whose result is a
`EventSourceClient`.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(
url,
connect_timeout=connect_timeout,
headers=httputil.HTTPHeaders({
"Accept-Encoding": "identity"
})
)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = EventSourceClient(io_loop, request)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future | python | {
"resource": ""
} |
q42791 | printout | train | def printout(*args, **kwargs):
"""
Print function with extra options for formating text in terminals.
"""
# TODO(Lukas): conflicts with function names
color = kwargs.pop('color', {})
style = kwargs.pop('style', {})
prefx = kwargs.pop('prefix', '')
suffx = kwargs.pop('suffix', '')
ind = kwargs.pop('indent', 0)
print_args = []
for arg in args:
arg = str(arg)
arg = colorize(arg, **color)
arg = stylize(arg, **style)
arg = prefix(arg, prefx)
arg = indent(arg, ind)
arg += str(suffx)
print_args.append(arg)
print(*print_args, **kwargs) | python | {
"resource": ""
} |
q42792 | colorize | train | def colorize(txt, fg=None, bg=None):
"""
Print escape codes to set the terminal color.
fg and bg are indices into the color palette for the foreground and
background colors.
"""
setting = ''
setting += _SET_FG.format(fg) if fg else ''
setting += _SET_BG.format(bg) if bg else ''
return setting + str(txt) + _STYLE_RESET | python | {
"resource": ""
} |
q42793 | stylize | train | def stylize(txt, bold=False, underline=False):
"""
Changes style of the text.
"""
setting = ''
setting += _SET_BOLD if bold is True else ''
setting += _SET_UNDERLINE if underline is True else ''
return setting + str(txt) + _STYLE_RESET | python | {
"resource": ""
} |
q42794 | indent | train | def indent(txt, spacing=4):
"""
Indent given text using custom spacing, default is set to 4.
"""
return prefix(str(txt), ''.join([' ' for _ in range(spacing)])) | python | {
"resource": ""
} |
q42795 | rgb | train | def rgb(red, green, blue):
"""
Calculate the palette index of a color in the 6x6x6 color cube.
The red, green and blue arguments may range from 0 to 5.
"""
for value in (red, green, blue):
if value not in range(6):
raise ColorError('Value must be within 0-5, was {}.'.format(value))
return 16 + (red * 36) + (green * 6) + blue | python | {
"resource": ""
} |
q42796 | isUTF8Strict | train | def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing.
'''
Check if all characters in a bytearray are decodable
using UTF-8.
'''
try:
decoded = data.decode('UTF-8')
except UnicodeDecodeError:
return False
else:
for ch in decoded:
if 0xD800 <= ord(ch) <= 0xDFFF:
return False
return True | python | {
"resource": ""
} |
q42797 | decode_headers | train | def decode_headers(header_list):
'''
Decode a list of headers.
Takes a list of bytestrings, returns a list of unicode strings.
The character set for each bytestring is individually decoded.
'''
decoded_headers = []
for header in header_list:
if cchardet:
inferred = cchardet.detect(header)
if inferred and inferred['confidence'] > 0.8:
# print("Parsing headers!", header)
decoded_headers.append(header.decode(inferred['encoding']))
else:
decoded_headers.append(header.decode('iso-8859-1'))
else: # pragma: no cover
# All bytes are < 127 (e.g. ASCII)
if all([char & 0x80 == 0 for char in header]):
decoded_headers.append(header.decode("us-ascii"))
elif isUTF8Strict(header):
decoded_headers.append(header.decode("utf-8"))
else:
decoded_headers.append(header.decode('iso-8859-1'))
return decoded_headers | python | {
"resource": ""
} |
q42798 | cd | train | def cd(dest):
""" Temporarily cd into a directory"""
origin = os.getcwd()
try:
os.chdir(dest)
yield dest
finally:
os.chdir(origin) | python | {
"resource": ""
} |
q42799 | files | train | def files(patterns,
require_tags=("require",),
include_tags=("include",),
exclude_tags=("exclude",),
root=".",
always_exclude=("**/.git*", "**/.lfs*", "**/.c9*", "**/.~c9*")):
"""
Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.
Any pattern tagged with a tag
from include_tags will be included
from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing
from exclude_tags will be excluded
Any pattern in always_exclude will always be excluded.
"""
require_tags = list(require_tags)
include_tags = list(include_tags)
exclude_tags = list(exclude_tags)
# Ensure every tag starts with !
for tags in [require_tags, include_tags, exclude_tags]:
for i, tag in enumerate(tags):
tags[i] = tag if tag.startswith("!") else "!" + tag
with cd(root):
# Include everything by default
included = _glob("*")
excluded = set()
if patterns:
missing_files = []
# Per line in files
for pattern in patterns:
# Include all files that are tagged with !require
if pattern.tag in require_tags:
file = str(Path(pattern.value))
if not Path(file).exists():
missing_files.append(file)
else:
try:
excluded.remove(file)
except KeyError:
pass
else:
included.add(file)
# Include all files that are tagged with !include
elif pattern.tag in include_tags:
new_included = _glob(pattern.value)
excluded -= new_included
included.update(new_included)
# Exclude all files that are tagged with !exclude
elif pattern.tag in exclude_tags:
new_excluded = _glob(pattern.value)
included -= new_excluded
excluded.update(new_excluded)
if missing_files:
raise MissingFilesError(missing_files)
# Exclude all files that match a pattern from always_exclude
for line in always_exclude:
included -= _glob(line)
# Exclude any files that are not valid utf8
invalid = set()
for file in included:
try:
file.encode("utf8")
except UnicodeEncodeError:
excluded.add(file.encode("utf8", "replace").decode())
invalid.add(file)
included -= invalid
return included, excluded | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.