_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q43100 | Client.frog_tip | train | def frog_tip(self):
"""\
Return a single FROG tip.
"""
cache = self._cache
client = self._client
if self.should_refresh:
tips = client.croak()
for number, tip in tips.items():
cache[str(number)] = tip
choice = random.choice(list(cache.keys()))
# We'll get a bytes() object here during real usage
# but a text-like object in the tests. Good job Python
try:
tip = cache[choice].decode()
except AttributeError:
tip = cache[choice]
del cache[choice]
return tip | python | {
"resource": ""
} |
q43101 | cli | train | def cli(dirty, stash):
"""
This is a tool that handles all the tasks to build a Python application
This tool is installed as a setuptools entry point, which means it should be accessible from your terminal once
this application is installed in develop mode.
"""
_setup_logging()
LOGGER.info('EPAB %s', __version__)
LOGGER.info('Running in %s', os.getcwd())
CTX.repo = epab.utils.Repo()
CTX.repo.ensure()
CTX.stash = stash
for filename in _GIT_IGNORE:
epab.utils.add_to_gitignore(filename)
if not dirty and CTX.repo.is_dirty():
LOGGER.error('Repository is dirty')
sys.exit(-1) | python | {
"resource": ""
} |
q43102 | _handle_response | train | def _handle_response(response, command, id_xpath='./id', **kwargs):
""" Initialize the corect Response object from the response string based on the API command type. """
_response_switch = {
'insert': ModifyResponse,
'replace': ModifyResponse,
'partial-replace': ModifyResponse,
'update': ModifyResponse,
'delete': ModifyResponse,
'search-delete': SearchDeleteResponse,
'reindex': Response,
'backup': Response,
'restore': Response,
'clear': Response,
'status': StatusResponse,
'search': SearchResponse,
'retrieve': ListResponse,
'similar': ListResponse,
'lookup': LookupResponse,
'alternatives': AlternativesResponse,
'list-words': WordsResponse,
'list-first': ListResponse,
'list-last': ListResponse,
'retrieve-last': ListResponse,
'retrieve-first': ListResponse,
'show-history': None,
'list-paths': ListPathsResponse,
'list-facets': ListFacetsResponse}
try:
request_class = _response_switch[command]
except KeyError:
request_class = Response
return request_class(response, id_xpath, **kwargs) | python | {
"resource": ""
} |
q43103 | Response._parse_for_errors | train | def _parse_for_errors(self):
""" Look for an error tag and raise APIError for fatal errors or APIWarning for nonfatal ones. """
error = self._response.find('{www.clusterpoint.com}error')
if error is not None:
if error.find('level').text.lower() in ('rejected', 'failed', 'error', 'fatal'):
raise APIError(error)
else:
warnings.warn(APIWarning(error)) | python | {
"resource": ""
} |
q43104 | Response.get_content_string | train | def get_content_string(self):
""" Ge thet Clusterpoint response's content as a string. """
return ''.join([ET.tostring(element, encoding="utf-8", method="xml")
for element in list(self._content)]) | python | {
"resource": ""
} |
q43105 | Response.get_content_field | train | def get_content_field(self, name):
""" Get the contents of a specific subtag from Clusterpoint Storage's response's content tag.
Args:
name -- A name string of the content's subtag to be returned.
Returns:
A dict representing the contents of the specified field or a list of dicts
if there are multiple fields with that tag name. Returns None if no field found.
"""
fields = self._content.findall(name)
if not fields:
return None
elif len(fields) == 1:
return etree_to_dict(fields[0])[name]
else:
return [etree_to_dict(field)[name] for field in fields] | python | {
"resource": ""
} |
q43106 | ListResponse.get_documents | train | def get_documents(self, doc_format='dict'):
""" Get the documents returned from Storege in this response.
Keyword args:
doc_format -- Specifies the doc_format for the returned documents.
Can be 'dict', 'etree' or 'string'. Default is 'dict'.
Returns:
A dict where keys are document ids and values depending of the required doc_format:
A dict representations of documents (see etree_to_dict());
A etree Element representing the document;
A raw XML document string.
Raises:
ParameterError -- The doc_format value is not allowed.
"""
def get_doc_id(root, rel_path):
if not rel_path:
return root.text
else:
child = root.find(rel_path[0])
if child is None:
return None
return get_doc_id(child, rel_path[1:])
if doc_format == 'dict':
return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for
document in self._get_doc_list()])
elif doc_format == 'etree':
return dict([(get_doc_id(document, self._id_xpath), document) for
document in self._get_doc_list()])
elif doc_format == 'list-etree':
return self._get_doc_list()
elif doc_format == 'list-string':
return list([(ET.tostring(document)) for
document in self._get_doc_list()])
elif doc_format in ('', None, 'string'):
return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for
document in self._get_doc_list()])
else:
raise ParameterError("doc_format=" + doc_format) | python | {
"resource": ""
} |
q43107 | SearchResponse.get_aggregate | train | def get_aggregate(self):
""" Get aggregate data.
Returns:
A dict in with queries as keys and results as values.
"""
return dict([(aggregate.find('query').text, [(ET.tostring(data).lstrip('<data xmlns:cps="www.clusterpoint.com" xmlns:cpse="www.clusterpoint.com">').strip().rstrip("</data>")) for data in aggregate.findall('data')])
for aggregate in self._content.findall('aggregate')]) | python | {
"resource": ""
} |
q43108 | WordsResponse.get_words | train | def get_words(self):
""" Get words matching the request search terms.
Returns:
A dict in form:
{<search term>: {<matching word>: <number of times this word is found in the Storage>
} // Repeated for every matching word.
} // Repeated for every search term.
"""
return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count'])
for word in word_list.findall('word')]))
for word_list in self._content.findall('list')]) | python | {
"resource": ""
} |
q43109 | AlternativesResponse.get_alternatives | train | def get_alternatives(self):
""" Get the spelling alternatives for search terms.
Returns:
A dict in form:
{<search term>: {'count': <number of times the searh term occurs in the Storage>,
'words': {<an alternative>: {'count': <number of times the alternative occurs in the Storage>,
'cr': <cr value of the alternative>,
'idif': <idif value of the alternative>,
'h': <h value of the alternative>}
} // Repeated for every alternative.
}
} // Repeated for every search term
"""
return dict([(alternatives.find('to').text,
{'count': int(alternatives.find('count').text),
'words': dict([(word.text, word.attrib)
for word in alternatives.findall('word')])})
for alternatives in
self._content.find('alternatives_list').findall('alternatives')]) | python | {
"resource": ""
} |
q43110 | ListFacetsResponse.get_facets | train | def get_facets(self):
""" Get facets from the response.
Returns:
A dict where requested facet paths are keys and a list of coresponding terms are values.
"""
return dict([(facet.attrib['path'], [term.text
for term in facet.findall('term')])
for facet in self._content.findall('facet')]) | python | {
"resource": ""
} |
q43111 | HttpClient._process_response | train | def _process_response(self):
"""Return a JSON result after an HTTP Request.
Process the response of an HTTP Request and make it a JSON error if
it failed. Otherwise return the response's content.
"""
response = self.conn.getresponse()
if response.status == 200 or response.status == 201:
data = response.read()
else:
data = {
"error": {
"code": response.status,
"reason": response.reason,
"data": response.read()
}
}
return data | python | {
"resource": ""
} |
q43112 | HttpClient.post | train | def post(self, url, data):
"""Send a HTTP POST request to a URL and return the result.
"""
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/json"
}
self.conn.request("POST", url, data, headers)
return self._process_response() | python | {
"resource": ""
} |
q43113 | HttpClient.put | train | def put(self, url, data=None):
"""Send a HTTP PUT request to a URL and return the result.
"""
self.conn.request("PUT", url, data)
return self._process_response() | python | {
"resource": ""
} |
q43114 | ServerAbstract.nginx_web_ssl_config | train | def nginx_web_ssl_config(self):
"""
Nginx web ssl config
"""
dt = [self.nginx_web_dir, self.nginx_ssl_dir]
return nginx_conf_string.simple_ssl_web_conf.format(dt=dt) | python | {
"resource": ""
} |
q43115 | extract_feature_base | train | def extract_feature_base(dbpath, folder_path, set_object, extractor, force_extraction=False, verbose=0,
add_args=None, custom_name=None):
"""
Generic function which extracts a feature and stores it in the database
Parameters
----------
dbpath : string, path to SQLite database file
folder_path : string, path to folder where the files are stored
set_object : object (either TestSet or TrainSet) which is stored in the database
extractor : function, which takes the path of a data point and *args as parameters and returns a feature
force_extraction : boolean, if True - will re-extract feature even if a feature with this name already
exists in the database, otherwise, will only extract if the feature doesn't exist in the database.
default value: False
verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted
ever verbose steps (for example, verbose=500 will print 0, 500, 1000 etc.). default value: 0
add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the
extractor should take only one input argument - the file path. default value: None
custom_name : string, optional name for the feature (it will be stored in the database with the custom_name
instead of extractor function name). if None, the extractor function name will be used. default value: None
Returns
-------
None
"""
if custom_name is None:
extractor_name = extractor.__name__
else:
extractor_name = custom_name
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
a = 0
tmp_object = session.query(set_object).get(1)
if tmp_object.features is None:
for i in session.query(set_object).order_by(set_object.id):
if add_args is None:
i.features = {extractor_name: extractor(join(folder_path, i.path))}
else:
i.features = {extractor_name: extractor(join(folder_path, i.path), add_args)}
if verbose > 0:
if a % verbose == 0:
print a
a += 1
elif (extractor_name not in tmp_object.features) or force_extraction is True:
for i in session.query(set_object).order_by(set_object.id):
if add_args is None:
i.features[extractor_name] = extractor(join(folder_path, i.path))
else:
i.features[extractor_name] = extractor(join(folder_path, i.path), add_args)
if verbose > 0:
if a % verbose == 0:
print a
a += 1
session.commit()
session.close()
return None | python | {
"resource": ""
} |
q43116 | return_features_base | train | def return_features_base(dbpath, set_object, names):
"""
Generic function which returns a list of extracted features from the database
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned
Returns
-------
return_list : list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside
list' is a feature (can be of any type)
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
if names == 'all':
for i in session.query(set_object).order_by(set_object.id):
row_list = []
for feature in i.features:
row_list.append(i.features[feature])
return_list.append(row_list[:])
else:
for i in session.query(set_object).order_by(set_object.id):
row_list = []
for feature in i.features:
if feature in names:
row_list.append(i.features[feature])
return_list.append(row_list[:])
return return_list | python | {
"resource": ""
} |
q43117 | return_features_numpy_base | train | def return_features_numpy_base(dbpath, set_object, points_amt, names):
"""
Generic function which returns a 2d numpy array of extracted features
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
points_amt : int, number of data points in the database
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned
Returns
-------
return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature
is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not
supported.
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(1)
if names == 'all':
columns_amt = 0
for feature in tmp_object.features:
if type(tmp_object.features[feature]) is np.ndarray:
columns_amt += tmp_object.features[feature].shape[0]
else:
columns_amt += 1
return_array = np.zeros([points_amt, columns_amt])
for i in enumerate(session.query(set_object).order_by(set_object.id)):
counter = 0
for feature in i[1].features:
feature_val = i[1].features[feature]
if type(feature_val) is np.ndarray:
columns_amt = feature_val.shape[0]
return_array[i[0], counter:counter + columns_amt] = feature_val[:]
counter += feature_val.shape[0]
else:
return_array[i[0], counter] = feature_val
counter += 1
else:
columns_amt = 0
for feature in tmp_object.features:
if feature in names:
if type(tmp_object.features[feature]) is np.ndarray:
columns_amt += tmp_object.features[feature].shape[0]
else:
columns_amt += 1
return_array = np.zeros([points_amt, columns_amt])
for i in enumerate(session.query(set_object).order_by(set_object.id)):
counter = 0
for feature in i[1].features:
if feature in names:
feature_val = i[1].features[feature]
if type(feature_val) is np.ndarray:
columns_amt = feature_val.shape[0]
return_array[i[0], counter:counter + columns_amt] = feature_val[:]
counter += feature_val.shape[0]
else:
return_array[i[0], counter] = feature_val
counter += 1
session.close()
return return_array | python | {
"resource": ""
} |
q43118 | return_real_id_base | train | def return_real_id_base(dbpath, set_object):
"""
Generic function which returns a list of real_id's
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
Returns
-------
return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
for i in session.query(set_object).order_by(set_object.id):
return_list.append(i.real_id)
session.close()
return return_list | python | {
"resource": ""
} |
q43119 | return_feature_list_base | train | def return_feature_list_base(dbpath, set_object):
"""
Generic function which returns a list of the names of all available features
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
Returns
-------
return_list : list of strings corresponding to all available features
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
tmp_object = session.query(set_object).get(1)
for feature in tmp_object.features:
return_list.append(feature)
session.close()
return return_list | python | {
"resource": ""
} |
q43120 | return_single_real_id_base | train | def return_single_real_id_base(dbpath, set_object, object_id):
"""
Generic function which returns a real_id string of an object specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
real_id : string
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
return tmp_object.real_id | python | {
"resource": ""
} |
q43121 | return_single_features_base | train | def return_single_features_base(dbpath, set_object, object_id):
"""
Generic function which returns the features of an object specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
features : dict containing the features
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
return tmp_object.features | python | {
"resource": ""
} |
q43122 | return_single_convert_numpy_base | train | def return_single_convert_numpy_base(dbpath, folder_path, set_object, object_id, converter, add_args=None):
"""
Generic function which converts an object specified by the object_id into a numpy array and returns the array,
the conversion is done by the 'converter' function
Parameters
----------
dbpath : string, path to SQLite database file
folder_path : string, path to folder where the files are stored
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
converter : function, which takes the path of a data point and *args as parameters and returns a numpy array
add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the
converter should take only one input argument - the file path. default value: None
Returns
-------
result : ndarray
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
if add_args is None:
return converter(join(folder_path, tmp_object.path))
else:
return converter(join(folder_path, tmp_object.path), add_args) | python | {
"resource": ""
} |
q43123 | delete_feature_base | train | def delete_feature_base(dbpath, set_object, name):
"""
Generic function which deletes a feature from a database
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
name : string, name of the feature to be deleted
Returns
-------
None
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(1)
if tmp_object.features is not None and name in tmp_object.features:
for i in session.query(set_object).order_by(set_object.id):
del i.features[name]
session.commit()
session.close()
return None | python | {
"resource": ""
} |
q43124 | DataSetBase.extract_feature | train | def extract_feature(self, extractor, force_extraction=False, verbose=0, add_args=None, custom_name=None):
"""
Extracts a feature and stores it in the database
Parameters
----------
extractor : function, which takes the path of a data point and *args as parameters and returns a feature
force_extraction : boolean, if True - will re-extract feature even if a feature with this name already
exists in the database, otherwise, will only extract if the feature doesn't exist in the database.
default value: False
verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted
add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the
extractor should take only one input argument - the file path. default value: None
custom_name : string, optional name for the feature (it will be stored in the database with the custom_name
instead of extractor function name). if None, the extractor function name will be used. default value: None
Returns
-------
None
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return extract_feature_base(self.dbpath, self.path_to_set, self._set_object, extractor, force_extraction,
verbose, add_args, custom_name) | python | {
"resource": ""
} |
q43125 | DataSetBase.extract_feature_dependent_feature | train | def extract_feature_dependent_feature(self, extractor, force_extraction=False, verbose=0, add_args=None,
custom_name=None):
"""
Extracts a feature which may be dependent on other features and stores it in the database
Parameters
----------
extractor : function, which takes the path of a data point, a dictionary of all other features and *args as
parameters and returns a feature
force_extraction : boolean, if True - will re-extract feature even if a feature with this name already
exists in the database, otherwise, will only extract if the feature doesn't exist in the database.
default value: False
verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted
add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the
extractor should take only one input argument - the file path. default value: None
custom_name : string, optional name for the feature (it will be stored in the database with the custom_name
instead of extractor function name). if None, the extractor function name will be used. default value: None
Returns
-------
None
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return extract_feature_dependent_feature_base(self.dbpath, self.path_to_set, self._set_object, extractor,
force_extraction, verbose, add_args, custom_name) | python | {
"resource": ""
} |
q43126 | DataSetBase.return_features | train | def return_features(self, names='all'):
"""
Returns a list of extracted features from the database
Parameters
----------
names : list of strings, a list of feature names which are to be retrieved from the database, if equal
to 'all', the all features will be returned, default value: 'all'
Returns
-------
A list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside list' is a
feature (can be of any type)
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return return_features_base(self.dbpath, self._set_object, names) | python | {
"resource": ""
} |
q43127 | DataSetBase.return_features_numpy | train | def return_features_numpy(self, names='all'):
"""
Returns a 2d numpy array of extracted features
Parameters
----------
names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all',
all features will be returned, default value: 'all'
Returns
-------
A numpy array of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array,
then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported.
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return return_features_numpy_base(self.dbpath, self._set_object, self.points_amt, names) | python | {
"resource": ""
} |
q43128 | DataSetBase.return_real_id | train | def return_real_id(self):
"""
Returns a list of real_id's
Parameters
----------
Returns
-------
A list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
return return_real_id_base(self.dbpath, self._set_object) | python | {
"resource": ""
} |
q43129 | DataSetBase.return_single_convert_numpy | train | def return_single_convert_numpy(self, object_id, converter, add_args=None):
"""
Converts an object specified by the object_id into a numpy array and returns the array,
the conversion is done by the 'converter' function
Parameters
----------
object_id : int, id of object in database
converter : function, which takes the path of a data point and *args as parameters and returns a numpy array
add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the
converter should take only one input argument - the file path. default value: None
Returns
-------
result : ndarray
"""
return return_single_convert_numpy_base(self.dbpath, self.path_to_set, self._set_object, object_id, converter,
add_args) | python | {
"resource": ""
} |
q43130 | LabeledDataSet.return_labels | train | def return_labels(self, original=False):
"""
Returns the labels of the dataset
Parameters
----------
original : if True, will return original labels, if False, will return transformed labels (as defined by
label_dict), default value: False
Returns
-------
A list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside list' is a
label
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
engine = create_engine('sqlite:////' + self.dbpath)
trainset.Base.metadata.create_all(engine)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
for i in session.query(trainset.TrainSet).order_by(trainset.TrainSet.id):
if original is True:
row_list = i.labels['original']
else:
row_list = i.labels['transformed']
return_list.append(row_list[:])
session.close()
return return_list | python | {
"resource": ""
} |
q43131 | LabeledDataSet.return_labels_numpy | train | def return_labels_numpy(self, original=False):
"""
Returns a 2d numpy array of labels
Parameters
----------
original : if True, will return original labels, if False, will return transformed labels (as defined by
label_dict), default value: False
Returns
-------
A numpy array of labels, each row corresponds to a single datapoint
"""
if self._prepopulated is False:
raise errors.EmptyDatabase(self.dbpath)
else:
engine = create_engine('sqlite:////' + self.dbpath)
trainset.Base.metadata.create_all(engine)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(trainset.TrainSet).get(1)
columns_amt = len(tmp_object.labels['original'])
return_array = np.zeros([self.points_amt, columns_amt])
for i in enumerate(session.query(trainset.TrainSet).order_by(trainset.TrainSet.id)):
if original is False:
return_array[i[0], :] = i[1].labels['transformed']
else:
return_array[i[0], :] = i[1].labels['original']
session.close()
return return_array | python | {
"resource": ""
} |
q43132 | LabeledDataSet.return_single_labels | train | def return_single_labels(self, object_id):
"""
Returns all labels for an object specified by the object_id
Parameters
----------
object_id : int, id of object in database
Returns
-------
result : list of labels
"""
engine = create_engine('sqlite:////' + self.dbpath)
trainset.Base.metadata.create_all(engine)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(trainset.TrainSet).get(object_id)
return tmp_object.labels | python | {
"resource": ""
} |
q43133 | CacheManager.path_in_cache | train | def path_in_cache(self, filename, metahash):
"""Generates the path to a file in the mh cache.
The generated path does not imply the file's existence!
Args:
filename: Filename relative to buildroot
rule: A targets.SomeBuildRule object
metahash: hash object
"""
cpath = self._genpath(filename, metahash)
if os.path.exists(cpath):
return cpath
else:
raise CacheMiss | python | {
"resource": ""
} |
q43134 | CacheManager._genpath | train | def _genpath(self, filename, mhash):
"""Generate the path to a file in the cache.
Does not check to see if the file exists. Just constructs the path
where it should be.
"""
mhash = mhash.hexdigest()
return os.path.join(self.mh_cachedir, mhash[0:2], mhash[2:4],
mhash, filename) | python | {
"resource": ""
} |
q43135 | CacheManager.putfile | train | def putfile(self, filepath, buildroot, metahash):
"""Put a file in the cache.
Args:
filepath: Path to file on disk.
buildroot: Path to buildroot
buildrule: The rule that generated this file.
metahash: hash object
"""
def gen_obj_path(filename):
filehash = util.hash_file(filepath).hexdigest()
return filehash, os.path.join(self.obj_cachedir, filehash[0:2],
filehash[2:4], filehash)
filepath_relative = filepath.split(buildroot)[1][1:] # Strip leading /
# Path for the metahashed reference:
incachepath = self._genpath(filepath_relative, metahash)
filehash, obj_path = gen_obj_path(filepath)
if not os.path.exists(obj_path):
obj_dir = os.path.dirname(obj_path)
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
log.debug('Adding to obj cache: %s -> %s', filepath, obj_path)
os.link(filepath, obj_path)
if os.path.exists(incachepath):
existingfile_hash = util.hash_file(incachepath).hexdigest()
if filehash != existingfile_hash:
log.warn('File found in mh cache, but checksum differs. '
'Replacing with this new version. (File: %s)',
filepath)
log.warn('Possible reasons for this:')
log.warn(' 1. This build is not hermetic, and something '
'differs about the build environment compared to the '
'previous build.')
log.warn(' 2. This file has a timestamp or other build-time '
'related data encoded into it, which will always '
'cause the checksum to differ when built.')
log.warn(' 3. Everything is terrible and nothing works.')
os.unlink(incachepath)
if not os.path.exists(incachepath):
log.debug('Adding to mh cache: %s -> %s', filepath, incachepath)
if not os.path.exists(os.path.dirname(incachepath)):
os.makedirs(os.path.dirname(incachepath))
os.link(obj_path, incachepath) | python | {
"resource": ""
} |
q43136 | CacheManager.in_cache | train | def in_cache(self, objpath, metahash):
"""Returns true if object is cached.
Args:
objpath: Filename relative to buildroot.
metahash: hash object
"""
try:
self.path_in_cache(objpath, metahash)
return True
except CacheMiss:
return False | python | {
"resource": ""
} |
q43137 | CacheManager.get_obj | train | def get_obj(self, objpath, metahash, dst_path):
"""Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
"""
incachepath = self.path_in_cache(objpath, metahash)
if not os.path.exists(incachepath):
raise CacheMiss('%s not in cache.' % incachepath)
else:
log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
os.link(incachepath, dst_path) | python | {
"resource": ""
} |
q43138 | Herald._get_link | train | def _get_link(self, peer):
"""
Returns a link to the given peer
:return: A Link object
:raise ValueError: Unknown peer
"""
assert isinstance(peer, beans.Peer)
# Look for a link to the peer, using routers
for router in self._routers:
link = router.get_link(peer)
if link:
return link
# Not found
raise ValueError("No link to peer {0}".format(peer)) | python | {
"resource": ""
} |
q43139 | Herald.send | train | def send(self, peer_id, message):
"""
Synchronously sends a message
:param peer_id: UUID of a peer
:param message: Message to send to the peer
:raise KeyError: Unknown peer
:raise ValueError: No link to the peer
"""
assert isinstance(message, beans.RawMessage)
# Get peer description (raises KeyError)
peer = self._directory.get_peer(peer_id)
# Get a link to the peer (raises ValueError)
link = self._get_link(peer)
assert isinstance(link, beans.AbstractLink)
# Call the link, and return its result
return link.send(message) | python | {
"resource": ""
} |
q43140 | change_default | train | def change_default(
kls,
key,
new_default,
new_converter=None,
new_reference_value=None,
):
"""return a new configman Option object that is a copy of an existing one,
giving the new one a different default value"""
an_option = kls.get_required_config()[key].copy()
an_option.default = new_default
if new_converter:
an_option.from_string_converter = new_converter
if new_reference_value:
an_option.reference_value_from = new_reference_value
return an_option | python | {
"resource": ""
} |
q43141 | AntBuild.get_target | train | def get_target(self):
"""
Reads the android target based on project.properties file.
Returns
A string containing the project target (android-23 being the default if none is found)
"""
with open('%s/project.properties' % self.path) as f:
for line in f.readlines():
matches = re.findall(r'^target=(.*)', line)
if len(matches) == 0:
continue
return matches[0].replace('\n', '')
return 'android-%s' % (config.sdk_version) | python | {
"resource": ""
} |
q43142 | Server.lookup | train | def lookup(self, name, host_override=None):
"""
Looks up a name from the DNSChain server. Throws exception if the
data is not valid JSON or if the namecoin entry does not exist in the
blockchain.
@param name: The name to lookup, e.g. 'id/dionyziz', note this $NAMESPACE/$NAME
format is not guaranteed. Additionally the caller must perform appropriate url
encoding _before_ the name is passed to urllib2.urlopen
"""
if host_override is not None:
self.headers['Host'] = host_override
full_url = "http://%s/%s" % (self.addr, name)
request = urllib2.Request(full_url, None, self.headers)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 404:
e = DataNotFound(e, name, self.headers['Host'])
if e.code < 200 or e.code > 299:
self._log.debug("%s" % (e.msg,), exc_info=True)
raise e
namecoin_string = response.read()
try:
data = json.loads(namecoin_string)
except ValueError:
raise MalformedJSON("%s\n%s" % (ValueError, namecoin_string))
return data | python | {
"resource": ""
} |
q43143 | cache_function | train | def cache_function(length):
"""
Caches a function, using the function itself as the key, and the return
value as the value saved. It passes all arguments on to the function, as
it should.
The decorator itself takes a length argument, which is the number of
seconds the cache will keep the result around.
It will put in a MethodNotFinishedError in the cache while the function is
processing. This should not matter in most cases, but if the app is using
threads, you won't be able to get the previous value, and will need to
wait until the function finishes. If this is not desired behavior, you can
remove the first two lines after the ``else``.
"""
def decorator(func):
def inner_func(*args, **kwargs):
from django.core.cache import cache
value = cache.get(func)
if func in cache:
return value
else:
# This will set a temporary value while ``func`` is being
# processed. When using threads, this is vital, as otherwise
# the function can be called several times before it finishes
# and is put into the cache.
class MethodNotFinishedError(Exception):
pass
cache.set(func, MethodNotFinishedError(
'The function %s has not finished processing yet. This \
value will be replaced when it finishes.' % (func.__name__)
), length)
result = func(*args, **kwargs)
cache.set(func, result, length)
return result
return inner_func
return decorator | python | {
"resource": ""
} |
q43144 | PluginClientEntryHookABC.angularFrontendAppDir | train | def angularFrontendAppDir(self) -> str:
""" Angular Frontend Dir
This directory will be linked into the angular app when it is compiled.
:return: The absolute path of the Angular2 app directory.
"""
relDir = self._packageCfg.config.plugin.title(require_string)
dir = os.path.join(self._pluginRoot, relDir)
if not os.path.isdir(dir): raise NotADirectoryError(dir)
return dir | python | {
"resource": ""
} |
q43145 | HTTPURI.put_content | train | def put_content(self, content):
"""
Makes a ``PUT`` request with the content in the body.
:raise: An :exc:`requests.RequestException` if it is not 2xx.
"""
r = requests.request(self.method if self.method else 'PUT', self.url, data=content, **self.storage_args)
if self.raise_for_status: r.raise_for_status() | python | {
"resource": ""
} |
q43146 | HTTPURI.dir_exists | train | def dir_exists(self):
"""
Makes a ``HEAD`` requests to the URI.
:returns: ``True`` if status code is 2xx.
"""
r = requests.request(self.method if self.method else 'HEAD', self.url, **self.storage_args)
try: r.raise_for_status()
except Exception: return False
return True | python | {
"resource": ""
} |
q43147 | SNSURI.put_content | train | def put_content(self, content):
"""
Publishes a message straight to SNS.
:param bytes content: raw bytes content to publish, will decode to ``UTF-8`` if string is detected
"""
if not isinstance(content, str):
content = content.decode('utf-8')
self.topic.publish(Message=content, **self.storage_args) | python | {
"resource": ""
} |
q43148 | registerExitCall | train | def registerExitCall():
r"""Registers an exit call to start the core.
The core would be started after the main module is loaded. Ec would be exited from the core.
"""
if state.isExitHooked:
return
state.isExitHooked = True
from atexit import register
register(core.start) | python | {
"resource": ""
} |
q43149 | generate_veq | train | def generate_veq(R=1.3, dR=0.1, Prot=6, dProt=0.1,nsamples=1e4,plot=False,
R_samples=None,Prot_samples=None):
""" Returns the mean and std equatorial velocity given R,dR,Prot,dProt
Assumes all distributions are normal. This will be used mainly for
testing purposes; I can use MC-generated v_eq distributions when we go for real.
"""
if R_samples is None:
R_samples = R*(1 + rand.normal(size=nsamples)*dR)
else:
inds = rand.randint(len(R_samples),size=nsamples)
R_samples = R_samples[inds]
if Prot_samples is None:
Prot_samples = Prot*(1 + rand.normal(size=nsamples)*dProt)
else:
inds = rand.randint(len(Prot_samples),size=nsamples)
Prot_samples = Prot_samples[inds]
veq_samples = 2*np.pi*R_samples*RSUN/(Prot_samples*DAY)/1e5
if plot:
plt.hist(veq_samples,histtype='step',color='k',bins=50,normed=True)
d = stats.norm(scale=veq_samples.std(),loc=veq_samples.mean())
vs = np.linspace(veq_samples.min(),veq_samples.max(),1e4)
plt.plot(vs,d.pdf(vs),'r')
return veq_samples.mean(),veq_samples.std() | python | {
"resource": ""
} |
q43150 | VirtualEnv.get_paths | train | def get_paths(self):
'''
get list of module paths
'''
# guess site package dir of virtualenv (system dependent)
venv_site_packages = '%s/lib/site-packages' % self.venv_dir
if not os.path.isdir(venv_site_packages):
venv_site_packages_glob = glob.glob('%s/lib/*/site-packages' % self.venv_dir)
if len(venv_site_packages_glob):
venv_site_packages = venv_site_packages_glob[0]
return [
self.venv_dir,
venv_site_packages
] | python | {
"resource": ""
} |
q43151 | VirtualEnv.create_virtualenv | train | def create_virtualenv(venv_dir, use_venv_module=True):
"""
creates a new virtualenv in venv_dir
By default, the built-in venv module is used.
On older versions of python, you may set use_venv_module to False to use virtualenv
"""
if not use_venv_module:
try:
check_call(['virtualenv', venv_dir, '--no-site-packages'])
except OSError:
raise Exception('You probably dont have virtualenv installed: sudo apt-get install python-virtualenv')
else:
check_call([sys.executable or 'python', '-m', 'venv', venv_dir]) | python | {
"resource": ""
} |
q43152 | data_dirpath | train | def data_dirpath(task=None, **kwargs):
"""Get the path of the corresponding data directory.
Parameters
----------
task : str, optional
The task for which datasets in the desired directory are used for. If
not given, a path for the corresponding task-agnostic directory is
returned.
**kwargs : extra keyword arguments
Extra keyword arguments, representing additional attributes of the
datasets, are used to generate additional sub-folders on the path.
For example, providing 'lang=en' will results in a path such as
'/barn_base_dir/regression/lang_en/mydataset.csv'. Hierarchy always
matches lexicographical order of keyword argument names, so 'lang=en'
and 'animal=dog' will result in a path such as
'barn_base_dir/task_name/animal_dof/lang_en/dset.csv'.
Returns
-------
str
The path to the desired dir.
"""
path = _base_dir()
if task:
path = os.path.join(path, _snail_case(task))
for k, v in sorted(kwargs.items()):
subdir_name = '{}_{}'.format(_snail_case(k), _snail_case(v))
path = os.path.join(path, subdir_name)
os.makedirs(path, exist_ok=True)
return path | python | {
"resource": ""
} |
q43153 | Resource.save | train | def save(self, *args, **kwargs):
"""
saves creates or updates current resource
returns new resource
"""
self._pre_save(*args, **kwargs)
response = self._save(*args, **kwargs)
response = self._post_save(response, *args, **kwargs)
return response | python | {
"resource": ""
} |
q43154 | Resource.load | train | def load(self, id, *args, **kwargs):
"""
loads a remote resource by id
"""
self._pre_load(id, *args, **kwargs)
response = self._load(id, *args, **kwargs)
response = self._post_load(response, *args, **kwargs)
return response | python | {
"resource": ""
} |
q43155 | Resource.delete | train | def delete(self, *args, **kwargs):
"""
deletes current resource
returns response from api
"""
self._pre_delete(*args, **kwargs)
response = self._delete(*args, **kwargs)
response = self._post_delete(response, *args, **kwargs)
return response | python | {
"resource": ""
} |
q43156 | BaseResource.to_json | train | def to_json(self):
"""
put the object to json and remove the internal stuff
salesking schema stores the type in the title
"""
data = json.dumps(self)
out = u'{"%s":%s}' % (self.schema['title'], data)
return out | python | {
"resource": ""
} |
q43157 | RemoteResource._do_api_call | train | def _do_api_call(self, call_type=u'', id = None):
"""
returns a response if it is a valid call
otherwise the corresponding error
"""
endpoint = None
url = None
# print "call_type %s" % (call_type)
if call_type == u'load':
endpoint = self.get_endpoint("self")
if id is None:
raise APIException("LOAD_IDNOTSET", "could not load object")
elif call_type == u'delete' or (call_type == u"destroy"):
endpoint = self.get_endpoint("destroy")
if id is None:
raise APIException("DELETE_IDNOTSET", "could not delete object")
elif call_type == u'update':
endpoint = self.get_endpoint("update")
if id is None:
raise APIException("UPDATE_IDNOTSET", "could not load object")
elif call_type == u'create':
endpoint = self.get_endpoint("create")
url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, endpoint['href'])
# post?
elif call_type == u'schema':
# add schema gethering functionality
# hackisch
endpoint = self.get_endpoint("create")
url = u"%s%s%s/schema" % (self.__api__.base_url, API_BASE_PATH, endpoint['href'])
endpoint['method'] = u'GET'
if id is not None:
url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, endpoint['href'].replace(u"{id}",id))
## excecute the api request
payload = self.to_json()
if u'method' in endpoint.keys():
method = endpoint['method']
else:
method = u'GET'
# request raises exceptions if something goes wrong
obj = None
try:
# dbg
msg = u"url: %s method:%s p: %s" % (url, method, payload)
#print msg
response = self.__api__.request(url, method, data=payload)
#load update create success
if ((response.status_code == 200 and
call_type in ['load', 'update']) or
(response.status_code == 201 and call_type == 'create')):
msg = "call_type: %s successfully completed" % call_type
log.info(msg)
return self.to_instance(response)
elif (response.status_code == 200 and call_type in ['delete', 'destroy']):
#delete success
msg ="call_type: %s successfully completed" % call_type
log.info(msg)
return self._try_to_serialize(response)
elif 200 <= response.status_code <= 299:
return self._try_to_serialize(response)
except Exception as e:
msg = "Exception occoured %s url: %s" % (e, url)
log.error(msg)
raise e | python | {
"resource": ""
} |
q43158 | open_logfile | train | def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-'*60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp | python | {
"resource": ""
} |
q43159 | ServiceBase.create | train | def create(self, resource, data):
'''
A base function that performs a default create POST request for a given object
'''
service_def, resource_def, path = self._get_service_information(
resource)
self._validate(resource, data)
return self.call(path=path, data=data, method='post') | python | {
"resource": ""
} |
q43160 | ServiceBase.update | train | def update(self, resource, resource_id, data):
'''
A base function that performs a default create PATCH request for a given object
'''
service_def, resource_def, path = self._get_service_information(
resource)
update_path = "{0}{1}/" . format(path, resource_id)
return self.call(path=update_path, data=data, method='patch') | python | {
"resource": ""
} |
q43161 | ServiceBase.delete | train | def delete(self, resource, resource_id):
'''
A base function that performs a default delete DELETE request for a given object
'''
service_def, resource_def, path = self._get_service_information(
resource)
delete_path = "{0}{1}/" . format(path, resource_id)
return self.call(path=delete_path, method="delete") | python | {
"resource": ""
} |
q43162 | ServiceBase._make_api | train | def _make_api(self, service_name):
'''
not yet in use ..
'''
resources = [resource for resource, resource_details in
service_definitions.get(service_name, {}).get("resources", {}).items()]
for resource in resources:
setattr(self, 'list_{0}' . format(resource), self.list)
setattr(self, 'get_{0}' . format(resource), self.get)
setattr(self, 'create_{0}' . format(resource), self.create)
setattr(self, 'update_{0}' . format(resource), self.update)
setattr(self, 'delete_{0}' . format(resource), self.delete) | python | {
"resource": ""
} |
q43163 | NetInfo.getSystemIps | train | def getSystemIps():
""" will not return the localhost one """
IPs = []
for interface in NetInfo.getSystemIfs():
if not interface.startswith('lo'):
ip = netinfo.get_ip(interface)
IPs.append(ip)
return IPs | python | {
"resource": ""
} |
q43164 | NetInfo.getIPString | train | def getIPString():
""" return comma delimited string of all the system IPs"""
if not(NetInfo.systemip):
NetInfo.systemip = ",".join(NetInfo.getSystemIps())
return NetInfo.systemip | python | {
"resource": ""
} |
q43165 | check_update | train | def check_update():
"""
Return True if an update is available on pypi
"""
r = requests.get("https://pypi.python.org/pypi/prof/json")
data = r.json()
if versiontuple(data['info']['version']) > versiontuple(__version__):
return True
return False | python | {
"resource": ""
} |
q43166 | to_ut1unix | train | def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray:
"""
converts time inputs to UT1 seconds since Unix epoch
"""
# keep this order
time = totime(time)
if isinstance(time, (float, int)):
return time
if isinstance(time, (tuple, list, np.ndarray)):
assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}'
return np.array(list(map(dt2ut1, time)))
else:
assert isinstance(time, datetime)
return dt2ut1(time) | python | {
"resource": ""
} |
q43167 | execute | train | def execute(tokens):
""" Perform the actions described by the input tokens. """
if not validate_rc():
print('Your .vacationrc file has errors!')
echo_vacation_rc()
return
for action, value in tokens:
if action == 'show':
show()
elif action == 'log':
log_vacation_days()
elif action == 'echo':
echo_vacation_rc()
elif action == 'take':
take(value)
elif action == 'cancel':
cancel(value)
elif action == 'setrate':
setrate(value)
elif action == 'setdays':
setdays(value) | python | {
"resource": ""
} |
q43168 | unique | train | def unique(transactions):
""" Remove any duplicate entries. """
seen = set()
# TODO: Handle comments
return [x for x in transactions if not (x in seen or seen.add(x))] | python | {
"resource": ""
} |
q43169 | sort | train | def sort(transactions):
""" Return a list of sorted transactions by date. """
return transactions.sort(key=lambda x: datetime.datetime.strptime(x.split(':')[0], '%Y-%m-%d'))[:] | python | {
"resource": ""
} |
q43170 | validate_rc | train | def validate_rc():
""" Before we execute any actions, let's validate our .vacationrc. """
transactions = rc.read()
if not transactions:
print('Your .vacationrc file is empty! Set days and rate.')
return False
transactions = sort(unique(transactions))
return validate_setup(transactions) | python | {
"resource": ""
} |
q43171 | validate_setup | train | def validate_setup(transactions):
""" First two transactions must set rate & days. """
if not transactions:
return True
try:
first, second = transactions[:2]
except ValueError:
print('Error: vacationrc file must have both initial days and rates entries')
return False
parts1, parts2 = first.split(), second.split()
if parts1[0] != parts2[0]:
print('Error: First two entries in vacationrc must have the same date')
return False # Dates must match
if 'rate' not in (parts1[1], parts2[1]) or 'days' not in (parts1[1], parts2[1]):
print('Error: First two entries in vacationrc must set days and rate')
return False
return True | python | {
"resource": ""
} |
q43172 | stat_holidays | train | def stat_holidays(province='BC', year=2015):
""" Returns a list of holiday dates for a province and year. """
return holidays.Canada(state=province, years=year).keys() | python | {
"resource": ""
} |
q43173 | sum_transactions | train | def sum_transactions(transactions):
""" Sums transactions into a total of remaining vacation days. """
workdays_per_year = 250
previous_date = None
rate = 0
day_sum = 0
for transaction in transactions:
date, action, value = _parse_transaction_entry(transaction)
if previous_date is None:
previous_date = date
elapsed = workdays.networkdays(previous_date, date, stat_holidays()) - 1
if action == 'rate':
rate = float(value) / workdays_per_year
elif action == 'off':
elapsed -= 1 # Didn't work that day
day_sum -= 1 # And we used a day
day_sum += rate * elapsed
if action == 'days':
day_sum = value # Fixed value as of this entry
previous_date = date
return day_sum | python | {
"resource": ""
} |
q43174 | get_days_off | train | def get_days_off(transactions):
""" Return the dates for any 'take day off' transactions. """
days_off = []
for trans in transactions:
date, action, _ = _parse_transaction_entry(trans)
if action == 'off':
days_off.append(date)
return days_off | python | {
"resource": ""
} |
q43175 | log_vacation_days | train | def log_vacation_days():
""" Sum and report taken days off. """
days_off = get_days_off(rc.read())
pretty_days = map(lambda day: day.strftime('%a %b %d %Y'), days_off)
for day in pretty_days:
print(day) | python | {
"resource": ""
} |
q43176 | echo_vacation_rc | train | def echo_vacation_rc():
""" Display all our .vacationrc file. """
contents = rc.read()
print('.vacationrc\n===========')
for line in contents:
print(line.rstrip()) | python | {
"resource": ""
} |
q43177 | to_email | train | def to_email(email_class, email, language=None, **data):
"""
Send email to specified email address
"""
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data) | python | {
"resource": ""
} |
q43178 | to_staff | train | def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data) | python | {
"resource": ""
} |
q43179 | listify | train | def listify(args):
"""Return args as a list.
If already a list - return as is.
>>> listify([1, 2, 3])
[1, 2, 3]
If a set - return as a list.
>>> listify(set([1, 2, 3]))
[1, 2, 3]
If a tuple - return as a list.
>>> listify(tuple([1, 2, 3]))
[1, 2, 3]
If a generator (also range / xrange) - return as a list.
>>> listify(x + 1 for x in range(3))
[1, 2, 3]
>>> from past.builtins import xrange
>>> from builtins import range
>>> listify(xrange(1, 4))
[1, 2, 3]
>>> listify(range(1, 4))
[1, 2, 3]
If a single instance of something that isn't any of the above - put as a
single element of the returned list.
>>> listify(1)
[1]
If "empty" (None or False or '' or anything else that evaluates to False),
return an empty list ([]).
>>> listify(None)
[]
>>> listify(False)
[]
>>> listify('')
[]
>>> listify(0)
[]
>>> listify([])
[]
"""
if args:
if isinstance(args, list):
return args
elif isinstance(args, (set, tuple, GeneratorType,
range, past.builtins.xrange)):
return list(args)
return [args]
return [] | python | {
"resource": ""
} |
q43180 | create_censor_file | train | def create_censor_file(input_dset,out_prefix=None,fraction=0.1,clip_to=0.1,max_exclude=0.3,motion_file=None,motion_exclude=1.0):
'''create a binary censor file using 3dToutcount
:input_dset: the input dataset
:prefix: output 1D file (default: ``prefix(input_dset)`` + ``.1D``)
:fraction: censor a timepoint if proportional of outliers in this
time point is greater than given value
:clip_to: keep the number of time points censored under this proportion
of total reps. If more time points would be censored,
it will only pick the top ``clip_to*reps`` points
:max_exclude: if more time points than the given proportion of reps are excluded for the
entire run, throw an exception -- something is probably wrong
:motion_file: optional filename of a "motion" file with multiple columns and rows corresponding to reps.
It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude``
:motion_exclude: Will exclude any reps that have a value greater than this in any column of ``motion_file``
'''
(outcount,perc_outliers) = nl.qc.outcount(input_dset,fraction)
info = nl.dset_info(input_dset)
binarize = lambda o,f: [oo<f for oo in o]
perc_outliers = lambda o: 1.-(sum(o)/float(info.reps))
if motion_file:
with open(motion_file,'Ur') as f:
motion = [max([float(y) for y in x.strip().split()]) for x in f.read().split('\n') if len(x.strip())>0 and x.strip()[0]!='#']
motion_1D = [x for x in binarize(motion,motion_exclude)]
if perc_outliers(motion_1D) > max_exclude:
nl.notify('Error: Too many points excluded because of motion (%.2f) in dset %s' % (perc_outliers(motion_1D),input_dset),level=nl.level.error)
return False
outcount = [outcount[i] if motion_1D[i] else 1. for i in range(len(outcount))]
binary_outcount = binarize(outcount,fraction)
if max_exclude and perc_outliers(binary_outcount) > max_exclude:
nl.notify('Error: Found %.1f%% outliers in dset %s' % (100*perc_outliers(outcount),input_dset),level=nl.level.error)
return False
if clip_to:
while perc_outliers(binary_outcount) > clip_to:
best_outlier = min([(outcount[i],i) for i in range(len(outcount)) if not binary_outcount[i]])
binary_outcount[best_outlier[1]] = True
if not out_prefix:
out_prefix = nl.prefix(input_dset) + '.1D'
with open(out_prefix,'w') as f:
f.write('\n'.join([str(int(x)) for x in binary_outcount]))
return True | python | {
"resource": ""
} |
q43181 | calc | train | def calc(dsets,expr,prefix=None,datum=None):
''' returns a string of an inline ``3dcalc``-style expression
``dsets`` can be a single string, or list of strings. Each string in ``dsets`` will
be labeled 'a','b','c', sequentially. The expression ``expr`` is used directly
If ``prefix`` is not given, will return a 3dcalc string that can be passed to another
AFNI program as a dataset. Otherwise, will create the dataset with the name ``prefix``'''
return available_method('calc')(dsets,expr,prefix,datum) | python | {
"resource": ""
} |
q43182 | cluster | train | def cluster(dset,min_distance,min_cluster_size,prefix=None):
'''clusters given ``dset`` connecting voxels ``min_distance``mm away with minimum cluster size of ``min_cluster_size``
default prefix is ``dset`` suffixed with ``_clust%d``'''
if prefix==None:
prefix = nl.suffix(dset,'_clust%d' % min_cluster_size)
return available_method('cluster')(dset,min_distance,min_cluster_size,prefix) | python | {
"resource": ""
} |
q43183 | blur | train | def blur(dset,fwhm,prefix=None):
'''blurs ``dset`` with given ``fwhm`` runs 3dmerge to blur dataset to given ``fwhm``
default ``prefix`` is to suffix ``dset`` with ``_blur%.1fmm``'''
if prefix==None:
prefix = nl.suffix(dset,'_blur%.1fmm'%fwhm)
return available_method('blur')(dset,fwhm,prefix) | python | {
"resource": ""
} |
q43184 | skull_strip | train | def skull_strip(dset,suffix='_ns',prefix=None,unifize=True):
'''attempts to cleanly remove skull from ``dset``'''
return available_method('skull_strip')(dset,suffix,prefix,unifize) | python | {
"resource": ""
} |
q43185 | collect_manifest_dependencies | train | def collect_manifest_dependencies(manifest_data, lockfile_data):
"""Convert the manifest format to the dependencies schema"""
output = {}
for dependencyName, dependencyConstraint in manifest_data.items():
output[dependencyName] = {
# identifies where this dependency is installed from
'source': 'example-package-manager',
# the constraint that the user is using (i.e. "> 1.0.0")
'constraint': dependencyConstraint,
# all available versions above and outside of their constraint
# - usually you would need to use the package manager lib or API
# to get this information (we just fake it here)
'available': [
{'name': '2.0.0'},
],
}
return output | python | {
"resource": ""
} |
q43186 | collect_lockfile_dependencies | train | def collect_lockfile_dependencies(lockfile_data):
"""Convert the lockfile format to the dependencies schema"""
output = {}
for dependencyName, installedVersion in lockfile_data.items():
output[dependencyName] = {
'source': 'example-package-manager',
'installed': {'name': installedVersion},
}
return output | python | {
"resource": ""
} |
q43187 | match_similar | train | def match_similar(base, items):
"""Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None
"""
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity) # TODO: make O(n)
return None | python | {
"resource": ""
} |
q43188 | duplicates | train | def duplicates(base, items):
"""Get an iterator of items similar but not equal to the base.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: generator of items sorted by similarity to the base
"""
for item in items:
if item.similarity(base) and not item.equality(base):
yield item | python | {
"resource": ""
} |
q43189 | sort | train | def sort(base, items):
"""Get a sorted list of items ranked in descending similarity.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: list of items sorted by similarity to the base
"""
return sorted(items, key=base.similarity, reverse=True) | python | {
"resource": ""
} |
q43190 | terms_from_dict | train | def terms_from_dict(source):
""" Convert a dict representing a query to a string.
Args:
source -- A dict with query xpaths as keys and text or nested query dicts as values.
Returns:
A string composed from the nested query terms given.
>>> terms_from_dict({'document': {'title': "Title this is", 'text': "A long text."}})
'<document><text>A long text.</text><title>Title this is</title></document>'
>>> terms_from_dict({'document/title': "Title this is", 'document/text': "A long text."})
'<document><title>Title this is</title></document><document><text>A long text.</text></document>'
"""
parsed = ''
for xpath, text in source.items():
if hasattr(text, 'keys'):
parsed += term(terms_from_dict(text), xpath, escape=False)
else:
parsed += term(text, xpath)
return parsed | python | {
"resource": ""
} |
q43191 | json_schema_validation_format | train | def json_schema_validation_format(value, schema_validation_type):
"""
adds iso8601 to the datetimevalidator
raises SchemaError if validation fails
"""
DEFAULT_FORMAT_VALIDATORS['date-time'] = validate_format_iso8601
DEFAULT_FORMAT_VALIDATORS['text'] = validate_format_text
validictory.validate(value, schema_validation_type, format_validators=DEFAULT_FORMAT_VALIDATORS) | python | {
"resource": ""
} |
q43192 | Tasks.register | train | def register(self, func):
"""
Register a task. Typically used as a decorator to the task function.
If a task by that name already exists,
a TaskAlreadyRegistered exception is raised.
:param func: func to register as an ape task
:return: invalid accessor
"""
if hasattr(self._tasks, func.__name__):
raise TaskAlreadyRegistered(func.__name__)
setattr(self._tasks, func.__name__, func)
return _get_invalid_accessor(func.__name__) | python | {
"resource": ""
} |
q43193 | purge_old_logs | train | def purge_old_logs(delete_before_days=7):
"""
Purges old logs from the database table
"""
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = Log.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | python | {
"resource": ""
} |
q43194 | purge_old_event_logs | train | def purge_old_event_logs(delete_before_days=7):
"""
Purges old event logs from the database table
"""
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = EventLog.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | python | {
"resource": ""
} |
q43195 | purge_old_request_logs | train | def purge_old_request_logs(delete_before_days=7):
"""
Purges old request logs from the database table
"""
delete_before_date = timezone.now() - timedelta(days=delete_before_days)
logs_deleted = RequestLog.objects.filter(
created_on__lte=delete_before_date).delete()
return logs_deleted | python | {
"resource": ""
} |
q43196 | sigmafilter | train | def sigmafilter(data, sigmas, passes):
"""Remove datapoints outside of a specified standard deviation range."""
for n in range(passes):
meandata = np.mean(data[~np.isnan(data)])
sigma = np.std(data[~np.isnan(data)])
data[data > meandata+sigmas*sigma] = np.nan
data[data < meandata-sigmas*sigma] = np.nan
return data | python | {
"resource": ""
} |
q43197 | runningstd | train | def runningstd(t, data, width):
"""Compute the running standard deviation of a time series.
Returns `t_new`, `std_r`.
"""
ne = len(t) - width
t_new = np.zeros(ne)
std_r = np.zeros(ne)
for i in range(ne):
t_new[i] = np.mean(t[i:i+width+1])
std_r[i] = scipy.stats.nanstd(data[i:i+width+1])
return t_new, std_r | python | {
"resource": ""
} |
q43198 | smooth | train | def smooth(data, fw):
"""Smooth data with a moving average."""
if fw == 0:
fdata = data
else:
fdata = lfilter(np.ones(fw)/fw, 1, data)
return fdata | python | {
"resource": ""
} |
q43199 | calcstats | train | def calcstats(data, t1, t2, sr):
"""Calculate the mean and standard deviation of some array between
t1 and t2 provided the sample rate sr.
"""
dataseg = data[sr*t1:sr*t2]
meandata = np.mean(dataseg[~np.isnan(dataseg)])
stddata = np.std(dataseg[~np.isnan(dataseg)])
return meandata, stddata | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.