sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def from_dict(page, content_type=None):
"""
Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object.
"""
result = Page(items=page.get('items'),
page_number=page.get('pageNumber'),
page_size=page.get('pageSize'),
total_elements=page.get('totalElements'),
has_next=page.get('hasNext'))
if content_type is not None:
if not issubclass(content_type, ModelBase):
raise ValueError("'content_type' must be a subclass of ModelBase.")
result.items = [content_type.from_dict(item) for item in result.items]
return result | Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the page.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the page.
"""
items = []
# attempt to replace each item with its dictionary representation if possible
for item in self.items:
if hasattr(item, 'to_dict'):
items.append(item.to_dict(remove_nones=remove_nones))
else:
items.append(item)
return {
'items': items,
'pageNumber': self.page_number,
'pageSize': self.page_size,
'totalElements': self.total_elements,
'hasNext': self.has_next
} | Creates a dictionary representation of the page.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the page. | entailment |
def get_page_generator(func, start_page=0, page_size=None):
"""
Constructs a generator for retrieving pages from a paginated endpoint. This method is intended for internal
use.
:param func: Should take parameters ``page_number`` and ``page_size`` and return the corresponding |Page| object.
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: A generator that generates each successive page.
"""
# initialize starting values
page_number = start_page
more_pages = True
# continuously request the next page as long as more pages exist
while more_pages:
# get next page
page = func(page_number=page_number, page_size=page_size)
yield page
# determine whether more pages exist
more_pages = page.has_more_pages()
page_number += 1 | Constructs a generator for retrieving pages from a paginated endpoint. This method is intended for internal
use.
:param func: Should take parameters ``page_number`` and ``page_size`` and return the corresponding |Page| object.
:param start_page: The page to start on.
:param page_size: The size of each page.
:return: A generator that generates each successive page. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: The dictionary representation.
"""
if remove_nones:
return {k: v for k, v in self.to_dict().items() if v is not None}
else:
raise NotImplementedError() | Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: The dictionary representation. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report.
"""
if remove_nones:
report_dict = super().to_dict(remove_nones=True)
else:
report_dict = {
'title': self.title,
'reportBody': self.body,
'timeBegan': self.time_began,
'externalUrl': self.external_url,
'distributionType': self._get_distribution_type(),
'externalTrackingId': self.external_id,
'enclaveIds': self.enclave_ids,
'created': self.created,
'updated': self.updated,
}
# id field might not be present
if self.id is not None:
report_dict['id'] = self.id
else:
report_dict['id'] = None
return report_dict | Creates a dictionary representation of the object.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the report. | entailment |
def from_dict(cls, report):
"""
Create a report object from a dictionary. This method is intended for internal use, to construct a
:class:`Report` object from the body of a response json. It expects the keys of the dictionary to match those
of the json that would be found in a response to an API call such as ``GET /report/{id}``.
:param report: The dictionary.
:return: The report object.
"""
# determine distribution type
distribution_type = report.get('distributionType')
if distribution_type is not None:
is_enclave = distribution_type.upper() != DistributionType.COMMUNITY
else:
is_enclave = None
return Report(id=report.get('id'),
title=report.get('title'),
body=report.get('reportBody'),
time_began=report.get('timeBegan'),
external_id=report.get('externalTrackingId'),
external_url=report.get('externalUrl'),
is_enclave=is_enclave,
enclave_ids=report.get('enclaveIds'),
created=report.get('created'),
updated=report.get('updated')) | Create a report object from a dictionary. This method is intended for internal use, to construct a
:class:`Report` object from the body of a response json. It expects the keys of the dictionary to match those
of the json that would be found in a response to an API call such as ``GET /report/{id}``.
:param report: The dictionary.
:return: The report object. | entailment |
def serialize_to_dict(dictionary):
'''Make a json-serializable dictionary from input dictionary by converting
non-serializable data types such as numpy arrays.'''
retval = {}
for k, v in dictionary.items():
if isinstance(v, dict):
retval[k] = serialize_to_dict(v)
else:
# This is when custom serialization happens
if isinstance(v, np.ndarray):
if v.dtype == 'float64':
# We don't support float64 on js side
v = v.astype('float32')
retval[k] = encode_numpy(v)
else:
retval[k] = v
return retval | Make a json-serializable dictionary from input dictionary by converting
non-serializable data types such as numpy arrays. | entailment |
def make_graph(pkg):
"""Returns a dictionary of information about pkg & its recursive deps.
Given a string, which can be parsed as a requirement specifier, return a
dictionary where each key is the name of pkg or one of its recursive
dependencies, and each value is a dictionary returned by research_package.
(No, it's not really a graph.)
"""
ignore = ['argparse', 'pip', 'setuptools', 'wsgiref']
pkg_deps = recursive_dependencies(pkg_resources.Requirement.parse(pkg))
dependencies = {key: {} for key in pkg_deps if key not in ignore}
installed_packages = pkg_resources.working_set
versions = {package.key: package.version for package in installed_packages}
for package in dependencies:
try:
dependencies[package]['version'] = versions[package]
except KeyError:
warnings.warn("{} is not installed so we cannot compute "
"resources for its dependencies.".format(package),
PackageNotInstalledWarning)
dependencies[package]['version'] = None
for package in dependencies:
package_data = research_package(package, dependencies[package]['version'])
dependencies[package].update(package_data)
return OrderedDict(
[(package, dependencies[package]) for package in sorted(dependencies.keys())]
) | Returns a dictionary of information about pkg & its recursive deps.
Given a string, which can be parsed as a requirement specifier, return a
dictionary where each key is the name of pkg or one of its recursive
dependencies, and each value is a dictionary returned by research_package.
(No, it's not really a graph.) | entailment |
def get_report_details(self, report_id, id_type=None):
"""
Retrieves a report by its ID. Internal and external IDs are both allowed.
:param str report_id: The ID of the incident report.
:param str id_type: Indicates whether ID is internal or external.
:return: The retrieved |Report| object.
Example:
>>> report = ts.get_report_details("1a09f14b-ef8c-443f-b082-9643071c522a")
>>> print(report)
{
"id": "1a09f14b-ef8c-443f-b082-9643071c522a",
"created": 1515571633505,
"updated": 1515620420062,
"reportBody": "Employee reported suspect email. We had multiple reports of suspicious email overnight ...",
"title": "Phishing Incident",
"enclaveIds": [
"ac6a0d17-7350-4410-bc57-9699521db992"
],
"distributionType": "ENCLAVE",
"timeBegan": 1479941278000
}
"""
params = {'idType': id_type}
resp = self._client.get("reports/%s" % report_id, params=params)
return Report.from_dict(resp.json()) | Retrieves a report by its ID. Internal and external IDs are both allowed.
:param str report_id: The ID of the incident report.
:param str id_type: Indicates whether ID is internal or external.
:return: The retrieved |Report| object.
Example:
>>> report = ts.get_report_details("1a09f14b-ef8c-443f-b082-9643071c522a")
>>> print(report)
{
"id": "1a09f14b-ef8c-443f-b082-9643071c522a",
"created": 1515571633505,
"updated": 1515620420062,
"reportBody": "Employee reported suspect email. We had multiple reports of suspicious email overnight ...",
"title": "Phishing Incident",
"enclaveIds": [
"ac6a0d17-7350-4410-bc57-9699521db992"
],
"distributionType": "ENCLAVE",
"timeBegan": 1479941278000
} | entailment |
def get_reports_page(self, is_enclave=None, enclave_ids=None, tag=None, excluded_tags=None,
from_time=None, to_time=None):
"""
Retrieves a page of reports, filtering by time window, distribution type, enclave association, and tag.
The results are sorted by updated time.
This method does not take ``page_number`` and ``page_size`` parameters. Instead, each successive page must be
found by adjusting the ``from_time`` and ``to_time`` parameters.
Note: This endpoint will only return reports from a time window of maximum size of 2 weeks. If you give a
time window larger than 2 weeks, it will pull reports starting at 2 weeks before the "to" date, through the
"to" date.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param list(str) tag: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned.
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:return: A |Page| of |Report| objects.
"""
distribution_type = None
# explicitly compare to True and False to distinguish from None (which is treated as False in a conditional)
if is_enclave:
distribution_type = DistributionType.ENCLAVE
elif not is_enclave:
distribution_type = DistributionType.COMMUNITY
if enclave_ids is None:
enclave_ids = self.enclave_ids
params = {
'from': from_time,
'to': to_time,
'distributionType': distribution_type,
'enclaveIds': enclave_ids,
'tags': tag,
'excludedTags': excluded_tags
}
resp = self._client.get("reports", params=params)
result = Page.from_dict(resp.json(), content_type=Report)
# create a Page object from the dict
return result | Retrieves a page of reports, filtering by time window, distribution type, enclave association, and tag.
The results are sorted by updated time.
This method does not take ``page_number`` and ``page_size`` parameters. Instead, each successive page must be
found by adjusting the ``from_time`` and ``to_time`` parameters.
Note: This endpoint will only return reports from a time window of maximum size of 2 weeks. If you give a
time window larger than 2 weeks, it will pull reports starting at 2 weeks before the "to" date, through the
"to" date.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param list(str) tag: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned.
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:return: A |Page| of |Report| objects. | entailment |
def submit_report(self, report):
"""
Submits a report.
* If ``report.is_enclave`` is ``True``, then the report will be submitted to the enclaves
identified by ``report.enclaves``; if that field is ``None``, then the enclave IDs registered with this
|TruStar| object will be used.
* If ``report.time_began`` is ``None``, then the current time will be used.
:param report: The |Report| object that was submitted, with the ``id`` field updated based
on values from the response.
Example:
>>> report = Report(title="Suspicious Activity",
>>> body="We have been receiving suspicious requests from 169.178.68.63.",
>>> enclave_ids=["602d4795-31cd-44f9-a85d-f33cb869145a"])
>>> report = ts.submit_report(report)
>>> print(report.id)
ac6a0d17-7350-4410-bc57-9699521db992
>>> print(report.title)
Suspicious Activity
"""
# make distribution type default to "enclave"
if report.is_enclave is None:
report.is_enclave = True
if report.enclave_ids is None:
# use configured enclave_ids by default if distribution type is ENCLAVE
if report.is_enclave:
report.enclave_ids = self.enclave_ids
# if distribution type is COMMUNITY, API still expects non-null list of enclaves
else:
report.enclave_ids = []
if report.is_enclave and len(report.enclave_ids) == 0:
raise Exception("Cannot submit a report of distribution type 'ENCLAVE' with an empty set of enclaves.")
# default time began is current time
if report.time_began is None:
report.set_time_began(datetime.now())
data = json.dumps(report.to_dict())
resp = self._client.post("reports", data=data, timeout=60)
# get report id from response body
report_id = resp.content
if isinstance(report_id, bytes):
report_id = report_id.decode('utf-8')
report.id = report_id
return report | Submits a report.
* If ``report.is_enclave`` is ``True``, then the report will be submitted to the enclaves
identified by ``report.enclaves``; if that field is ``None``, then the enclave IDs registered with this
|TruStar| object will be used.
* If ``report.time_began`` is ``None``, then the current time will be used.
:param report: The |Report| object that was submitted, with the ``id`` field updated based
on values from the response.
Example:
>>> report = Report(title="Suspicious Activity",
>>> body="We have been receiving suspicious requests from 169.178.68.63.",
>>> enclave_ids=["602d4795-31cd-44f9-a85d-f33cb869145a"])
>>> report = ts.submit_report(report)
>>> print(report.id)
ac6a0d17-7350-4410-bc57-9699521db992
>>> print(report.title)
Suspicious Activity | entailment |
def update_report(self, report):
"""
Updates the report identified by the ``report.id`` field; if this field does not exist, then
``report.external_id`` will be used if it exists. Any other fields on ``report`` that are not ``None``
will overwrite values on the report in TruSTAR's system. Any fields that are ``None`` will simply be ignored;
their values will be unchanged.
:param report: A |Report| object with the updated values.
:return: The |Report| object.
Example:
>>> report = ts.get_report_details(report_id)
>>> print(report.title)
Old Title
>>> report.title = "Changed title"
>>> updated_report = ts.update_report(report)
>>> print(updated_report.title)
Changed Title
"""
# default to interal ID type if ID field is present
if report.id is not None:
id_type = IdType.INTERNAL
report_id = report.id
# if no ID field is present, but external ID field is, default to external ID type
elif report.external_id is not None:
id_type = IdType.EXTERNAL
report_id = report.external_id
# if no ID fields exist, raise exception
else:
raise Exception("Cannot update report without either an ID or an external ID.")
# not allowed to update value of 'reportId', so remove it
report_dict = {k: v for k, v in report.to_dict().items() if k != 'reportId'}
params = {'idType': id_type}
data = json.dumps(report.to_dict())
self._client.put("reports/%s" % report_id, data=data, params=params)
return report | Updates the report identified by the ``report.id`` field; if this field does not exist, then
``report.external_id`` will be used if it exists. Any other fields on ``report`` that are not ``None``
will overwrite values on the report in TruSTAR's system. Any fields that are ``None`` will simply be ignored;
their values will be unchanged.
:param report: A |Report| object with the updated values.
:return: The |Report| object.
Example:
>>> report = ts.get_report_details(report_id)
>>> print(report.title)
Old Title
>>> report.title = "Changed title"
>>> updated_report = ts.update_report(report)
>>> print(updated_report.title)
Changed Title | entailment |
def delete_report(self, report_id, id_type=None):
"""
Deletes the report with the given ID.
:param report_id: the ID of the report to delete
:param id_type: indicates whether the ID is internal or an external ID provided by the user
:return: the response object
Example:
>>> response = ts.delete_report("4d1fcaee-5009-4620-b239-2b22c3992b80")
"""
params = {'idType': id_type}
self._client.delete("reports/%s" % report_id, params=params) | Deletes the report with the given ID.
:param report_id: the ID of the report to delete
:param id_type: indicates whether the ID is internal or an external ID provided by the user
:return: the response object
Example:
>>> response = ts.delete_report("4d1fcaee-5009-4620-b239-2b22c3992b80") | entailment |
def get_correlated_report_ids(self, indicators):
"""
DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
"""
params = {'indicators': indicators}
resp = self._client.get("reports/correlate", params=params)
return resp.json() | DEPRECATED!
Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:return: The list of IDs of reports that correlated.
Example:
>>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"])
>>> print(report_ids)
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"] | entailment |
def get_correlated_reports_page(self, indicators, enclave_ids=None, is_enclave=True,
page_size=None, page_number=None):
"""
Retrieves a page of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids: The enclaves to search in.
:param is_enclave: Whether to search enclave reports or community reports.
:param int page_number: the page number to get.
:param int page_size: the size of the page to be returned.
:return: The list of IDs of reports that correlated.
Example:
>>> reports = ts.get_correlated_reports_page(["wannacry", "www.evil.com"]).items
>>> print([report.id for report in reports])
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
"""
if is_enclave:
distribution_type = DistributionType.ENCLAVE
else:
distribution_type = DistributionType.COMMUNITY
params = {
'indicators': indicators,
'enclaveIds': enclave_ids,
'distributionType': distribution_type,
'pageNumber': page_number,
'pageSize': page_size
}
resp = self._client.get("reports/correlated", params=params)
return Page.from_dict(resp.json(), content_type=Report) | Retrieves a page of all TruSTAR reports that contain the searched indicators.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids: The enclaves to search in.
:param is_enclave: Whether to search enclave reports or community reports.
:param int page_number: the page number to get.
:param int page_size: the size of the page to be returned.
:return: The list of IDs of reports that correlated.
Example:
>>> reports = ts.get_correlated_reports_page(["wannacry", "www.evil.com"]).items
>>> print([report.id for report in reports])
["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"] | entailment |
def search_reports_page(self, search_term=None,
enclave_ids=None,
from_time=None,
to_time=None,
tags=None,
excluded_tags=None,
page_size=None,
page_number=None):
"""
Search for reports containing a search term.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int page_number: the page number to get. (optional)
:param int page_size: the size of the page to be returned.
:return: a |Page| of |Report| objects. *NOTE*: The bodies of these reports will be ``None``.
"""
body = {
'searchTerm': search_term
}
params = {
'enclaveIds': enclave_ids,
'from': from_time,
'to': to_time,
'tags': tags,
'excludedTags': excluded_tags,
'pageSize': page_size,
'pageNumber': page_number
}
resp = self._client.post("reports/search", params=params, data=json.dumps(body))
page = Page.from_dict(resp.json(), content_type=Report)
return page | Search for reports containing a search term.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int page_number: the page number to get. (optional)
:param int page_size: the size of the page to be returned.
:return: a |Page| of |Report| objects. *NOTE*: The bodies of these reports will be ``None``. | entailment |
def _get_reports_page_generator(self, is_enclave=None, enclave_ids=None, tag=None, excluded_tags=None,
from_time=None, to_time=None):
"""
Creates a generator from the |get_reports_page| method that returns each successive page.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific
enclaves (optional - by default reports from all enclaves are returned)
:param str tag: name of tag to filter reports by. if a tag with this name exists in more than one enclave
indicated in ``enclave_ids``, the request will fail. handle this by making separate requests for each
enclave ID if necessary.
:param int from_time: start of time window in milliseconds since epoch
:param int to_time: end of time window in milliseconds since epoch (optional, defaults to current time)
:return: The generator.
"""
get_page = functools.partial(self.get_reports_page, is_enclave, enclave_ids, tag, excluded_tags)
return get_time_based_page_generator(
get_page=get_page,
get_next_to_time=lambda x: x.items[-1].updated if len(x.items) > 0 else None,
from_time=from_time,
to_time=to_time
) | Creates a generator from the |get_reports_page| method that returns each successive page.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific
enclaves (optional - by default reports from all enclaves are returned)
:param str tag: name of tag to filter reports by. if a tag with this name exists in more than one enclave
indicated in ``enclave_ids``, the request will fail. handle this by making separate requests for each
enclave ID if necessary.
:param int from_time: start of time window in milliseconds since epoch
:param int to_time: end of time window in milliseconds since epoch (optional, defaults to current time)
:return: The generator. | entailment |
def get_reports(self, is_enclave=None, enclave_ids=None, tag=None, excluded_tags=None, from_time=None, to_time=None):
"""
Uses the |get_reports_page| method to create a generator that returns each successive report as a trustar
report object.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific
enclaves (optional - by default reports from all enclaves are returned)
:param list(str) tag: a list of tags; only reports containing ALL of these tags will be returned.
If a tag with this name exists in more than one enclave in the list passed as the ``enclave_ids``
argument, the request will fail. Handle this by making separate requests for each
enclave ID if necessary.
:param list(str) excluded_tags: a list of tags; reports containing ANY of these tags will not be returned.
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:return: A generator of Report objects.
Note: If a report contains all of the tags in the list passed as argument to the 'tag' parameter and also
contains any (1 or more) of the tags in the list passed as argument to the 'excluded_tags' parameter, that
report will not be returned by this function.
Example:
>>> page = ts.get_reports(is_enclave=True, tag="malicious", from_time=1425695711000, to_time=1514185311000)
>>> for report in reports: print(report.id)
'661583cb-a6a7-4cbd-8a90-01578fa4da89'
'da131660-2708-4c8a-926e-f91fb5dbbc62'
'2e3400d6-fa37-4a8c-bc2f-155aaa02ae5a'
'38064828-d3db-4fff-8ab8-e0e3b304ff44'
'dbf26104-cee5-4ca4-bdbf-a01d0178c007'
"""
return Page.get_generator(page_generator=self._get_reports_page_generator(is_enclave, enclave_ids, tag,
excluded_tags, from_time, to_time)) | Uses the |get_reports_page| method to create a generator that returns each successive report as a trustar
report object.
:param boolean is_enclave: restrict reports to specific distribution type (optional - by default all accessible
reports are returned).
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific
enclaves (optional - by default reports from all enclaves are returned)
:param list(str) tag: a list of tags; only reports containing ALL of these tags will be returned.
If a tag with this name exists in more than one enclave in the list passed as the ``enclave_ids``
argument, the request will fail. Handle this by making separate requests for each
enclave ID if necessary.
:param list(str) excluded_tags: a list of tags; reports containing ANY of these tags will not be returned.
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:return: A generator of Report objects.
Note: If a report contains all of the tags in the list passed as argument to the 'tag' parameter and also
contains any (1 or more) of the tags in the list passed as argument to the 'excluded_tags' parameter, that
report will not be returned by this function.
Example:
>>> page = ts.get_reports(is_enclave=True, tag="malicious", from_time=1425695711000, to_time=1514185311000)
>>> for report in reports: print(report.id)
'661583cb-a6a7-4cbd-8a90-01578fa4da89'
'da131660-2708-4c8a-926e-f91fb5dbbc62'
'2e3400d6-fa37-4a8c-bc2f-155aaa02ae5a'
'38064828-d3db-4fff-8ab8-e0e3b304ff44'
'dbf26104-cee5-4ca4-bdbf-a01d0178c007' | entailment |
def _get_correlated_reports_page_generator(self, indicators, enclave_ids=None, is_enclave=True,
start_page=0, page_size=None):
"""
Creates a generator from the |get_correlated_reports_page| method that returns each
successive page.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids:
:param is_enclave:
:return: The generator.
"""
get_page = functools.partial(self.get_correlated_reports_page, indicators, enclave_ids, is_enclave)
return Page.get_page_generator(get_page, start_page, page_size) | Creates a generator from the |get_correlated_reports_page| method that returns each
successive page.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids:
:param is_enclave:
:return: The generator. | entailment |
def get_correlated_reports(self, indicators, enclave_ids=None, is_enclave=True):
"""
Uses the |get_correlated_reports_page| method to create a generator that returns each successive report.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids: The enclaves to search in.
:param is_enclave: Whether to search enclave reports or community reports.
:return: The generator.
"""
return Page.get_generator(page_generator=self._get_correlated_reports_page_generator(indicators,
enclave_ids,
is_enclave)) | Uses the |get_correlated_reports_page| method to create a generator that returns each successive report.
:param indicators: A list of indicator values to retrieve correlated reports for.
:param enclave_ids: The enclaves to search in.
:param is_enclave: Whether to search enclave reports or community reports.
:return: The generator. | entailment |
def _search_reports_page_generator(self, search_term=None,
enclave_ids=None,
from_time=None,
to_time=None,
tags=None,
excluded_tags=None,
start_page=0,
page_size=None):
"""
Creates a generator from the |search_reports_page| method that returns each successive page.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator.
"""
get_page = functools.partial(self.search_reports_page, search_term, enclave_ids, from_time, to_time, tags,
excluded_tags)
return Page.get_page_generator(get_page, start_page, page_size) | Creates a generator from the |search_reports_page| method that returns each successive page.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:param int start_page: The page to start on.
:param page_size: The size of each page.
:return: The generator. | entailment |
def search_reports(self, search_term=None,
enclave_ids=None,
from_time=None,
to_time=None,
tags=None,
excluded_tags=None):
"""
Uses the |search_reports_page| method to create a generator that returns each successive report.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:return: The generator of Report objects. Note that the body attributes of these reports will be ``None``.
"""
return Page.get_generator(page_generator=self._search_reports_page_generator(search_term, enclave_ids,
from_time, to_time, tags,
excluded_tags)) | Uses the |search_reports_page| method to create a generator that returns each successive report.
:param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must
be at least 3 characters.
:param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by
default reports from all of user's enclaves are returned)
:param int from_time: start of time window in milliseconds since epoch (optional)
:param int to_time: end of time window in milliseconds since epoch (optional)
:param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing
ALL of these tags will be returned. (optional)
:param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results.
:return: The generator of Report objects. Note that the body attributes of these reports will be ``None``. | entailment |
def normalize_timestamp(date_time):
"""
TODO: get rid of this function and all references to it / uses of it.
Attempt to convert a string timestamp in to a TruSTAR compatible format for submission.
Will return current time with UTC time zone if None
:param date_time: int that is seconds or milliseconds since epoch, or string/datetime object containing date, time,
and (ideally) timezone.
Examples of supported timestamp formats: 1487890914, 1487890914000, "2017-02-23T23:01:54", "2017-02-23T23:01:54+0000"
:return If input is an int, will return milliseconds since epoch. Otherwise, will return a normalized isoformat
timestamp.
"""
# if timestamp is null, just return the same null.
if not date_time:
return date_time
datetime_dt = datetime.now()
# get current time in seconds-since-epoch
current_time = int(time.time()) * 1000
try:
# identify type of timestamp and convert to datetime object
if isinstance(date_time, int):
# if timestamp has less than 10 digits, it is in seconds
if date_time < 10000000000:
date_time *= 1000
# if timestamp is incorrectly forward dated, set to current time
if date_time > current_time:
raise ValueError("The given time %s is in the future." % date_time)
return date_time
if isinstance(date_time, str):
datetime_dt = dateutil.parser.parse(date_time)
elif isinstance(date_time, datetime):
datetime_dt = date_time
# if timestamp is none of the formats above, error message is printed and timestamp is set to current time by
# default
except Exception as e:
logger.warning(e)
logger.warning("Using current time as replacement.")
datetime_dt = datetime.now()
# if timestamp is timezone naive, add timezone
if not datetime_dt.tzinfo:
# add system timezone and convert to UTC
datetime_dt = get_localzone().localize(datetime_dt).astimezone(pytz.utc)
# converts datetime to iso8601
return datetime_dt.isoformat() | TODO: get rid of this function and all references to it / uses of it.
Attempt to convert a string timestamp in to a TruSTAR compatible format for submission.
Will return current time with UTC time zone if None
:param date_time: int that is seconds or milliseconds since epoch, or string/datetime object containing date, time,
and (ideally) timezone.
Examples of supported timestamp formats: 1487890914, 1487890914000, "2017-02-23T23:01:54", "2017-02-23T23:01:54+0000"
:return If input is an int, will return milliseconds since epoch. Otherwise, will return a normalized isoformat
timestamp. | entailment |
def parse_boolean(value):
"""
Coerce a value to boolean.
:param value: the value, could be a string, boolean, or None
:return: the value as coerced to a boolean
"""
if value is None:
return None
if isinstance(value, bool):
return value
if isinstance(value, string_types):
value = value.lower()
if value == 'false':
return False
if value == 'true':
return True
raise ValueError("Could not convert value to boolean: {}".format(value)) | Coerce a value to boolean.
:param value: the value, could be a string, boolean, or None
:return: the value as coerced to a boolean | entailment |
def config_from_file(config_file_path, config_role):
"""
Create a configuration dictionary from a config file section. This dictionary is what the TruStar
class constructor ultimately requires.
:param config_file_path: The path to the config file.
:param config_role: The section within the file to use.
:return: The configuration dictionary.
"""
# read config file depending on filetype, parse into dictionary
ext = os.path.splitext(config_file_path)[-1]
if ext in ['.conf', '.ini']:
config_parser = configparser.RawConfigParser()
config_parser.read(config_file_path)
roles = dict(config_parser)
elif ext in ['.json', '.yml', '.yaml']:
with open(config_file_path, 'r') as f:
roles = yaml.safe_load(f)
else:
raise IOError("Unrecognized filetype for config file '%s'" % config_file_path)
# ensure that config file has indicated role
if config_role in roles:
config = dict(roles[config_role])
else:
raise KeyError("Could not find role %s" % config_role)
# parse enclave ids
if 'enclave_ids' in config:
# if id has all numeric characters, will be parsed as an int, so convert to string
if isinstance(config['enclave_ids'], int):
config['enclave_ids'] = str(config['enclave_ids'])
# split comma separated list if necessary
if isinstance(config['enclave_ids'], string_types):
config['enclave_ids'] = config['enclave_ids'].split(',')
elif not isinstance(config['enclave_ids'], list):
raise Exception("'enclave_ids' must be a list or a comma-separated list")
# strip out whitespace
config['enclave_ids'] = [str(x).strip() for x in config['enclave_ids'] if x is not None]
else:
# default to empty list
config['enclave_ids'] = []
return config | Create a configuration dictionary from a config file section. This dictionary is what the TruStar
class constructor ultimately requires.
:param config_file_path: The path to the config file.
:param config_role: The section within the file to use.
:return: The configuration dictionary. | entailment |
def get_version(self):
"""
Get the version number of the API.
Example:
>>> ts.get_version()
1.3
"""
result = self._client.get("version").content
if isinstance(result, bytes):
result = result.decode('utf-8')
return result.strip('\n') | Get the version number of the API.
Example:
>>> ts.get_version()
1.3 | entailment |
def get_user_enclaves(self):
"""
Gets the list of enclaves that the user has access to.
:return: A list of |EnclavePermissions| objects, each representing an enclave and whether the requesting user
has read, create, and update access to it.
"""
resp = self._client.get("enclaves")
return [EnclavePermissions.from_dict(enclave) for enclave in resp.json()] | Gets the list of enclaves that the user has access to.
:return: A list of |EnclavePermissions| objects, each representing an enclave and whether the requesting user
has read, create, and update access to it. | entailment |
def get_request_quotas(self):
"""
Gets the request quotas for the user's company.
:return: A list of |RequestQuota| objects.
"""
resp = self._client.get("request-quotas")
return [RequestQuota.from_dict(quota) for quota in resp.json()] | Gets the request quotas for the user's company.
:return: A list of |RequestQuota| objects. | entailment |
def configure_logging():
"""
Initialize logging configuration to defaults. If the environment variable DISABLE_TRUSTAR_LOGGING is set to true,
this will be ignored.
"""
if not parse_boolean(os.environ.get('DISABLE_TRUSTAR_LOGGING')):
# configure
dictConfig(DEFAULT_LOGGING_CONFIG)
# construct error logger
error_logger = logging.getLogger("error")
# log all uncaught exceptions
def log_exception(exc_type, exc_value, exc_traceback):
error_logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
# register logging function as exception hook
sys.excepthook = log_exception | Initialize logging configuration to defaults. If the environment variable DISABLE_TRUSTAR_LOGGING is set to true,
this will be ignored. | entailment |
def from_dict(cls, enclave):
"""
Create a enclave object from a dictionary.
:param enclave: The dictionary.
:return: The enclave object.
"""
return Enclave(id=enclave.get('id'),
name=enclave.get('name'),
type=EnclaveType.from_string(enclave.get('type'))) | Create a enclave object from a dictionary.
:param enclave: The dictionary.
:return: The enclave object. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the enclave.
"""
if remove_nones:
return super().to_dict(remove_nones=True)
return {
'id': self.id,
'name': self.name,
'type': self.type
} | Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the enclave. | entailment |
def from_dict(cls, d):
"""
Create a enclave object from a dictionary.
:param d: The dictionary.
:return: The EnclavePermissions object.
"""
enclave = super(cls, EnclavePermissions).from_dict(d)
enclave_permissions = cls.from_enclave(enclave)
enclave_permissions.read = d.get('read')
enclave_permissions.create = d.get('create')
enclave_permissions.update = d.get('update')
return enclave_permissions | Create a enclave object from a dictionary.
:param d: The dictionary.
:return: The EnclavePermissions object. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the EnclavePermissions object.
"""
d = super().to_dict(remove_nones=remove_nones)
d.update({
'read': self.read,
'create': self.create,
'update': self.update
})
return d | Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the EnclavePermissions object. | entailment |
def from_enclave(cls, enclave):
"""
Create an |EnclavePermissions| object from an |Enclave| object.
:param enclave: the Enclave object
:return: an EnclavePermissions object
"""
return EnclavePermissions(id=enclave.id,
name=enclave.name,
type=enclave.type) | Create an |EnclavePermissions| object from an |Enclave| object.
:param enclave: the Enclave object
:return: an EnclavePermissions object | entailment |
def get_enclave_tags(self, report_id, id_type=None):
"""
Retrieves all enclave tags present in a specific report.
:param report_id: the ID of the report
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: A list of |Tag| objects.
"""
params = {'idType': id_type}
resp = self._client.get("reports/%s/tags" % report_id, params=params)
return [Tag.from_dict(indicator) for indicator in resp.json()] | Retrieves all enclave tags present in a specific report.
:param report_id: the ID of the report
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: A list of |Tag| objects. | entailment |
def add_enclave_tag(self, report_id, name, enclave_id, id_type=None):
"""
Adds a tag to a specific report, for a specific enclave.
:param report_id: The ID of the report
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: The ID of the tag that was created.
"""
params = {
'idType': id_type,
'name': name,
'enclaveId': enclave_id
}
resp = self._client.post("reports/%s/tags" % report_id, params=params)
return str(resp.content) | Adds a tag to a specific report, for a specific enclave.
:param report_id: The ID of the report
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:param id_type: indicates whether the ID internal or an external ID provided by the user
:return: The ID of the tag that was created. | entailment |
def delete_enclave_tag(self, report_id, tag_id, id_type=None):
"""
Deletes a tag from a specific report, in a specific enclave.
:param string report_id: The ID of the report
:param string tag_id: ID of the tag to delete
:param string id_type: indicates whether the ID internal or an external ID provided by the user
:return: The response body.
"""
params = {
'idType': id_type
}
self._client.delete("reports/%s/tags/%s" % (report_id, tag_id), params=params) | Deletes a tag from a specific report, in a specific enclave.
:param string report_id: The ID of the report
:param string tag_id: ID of the tag to delete
:param string id_type: indicates whether the ID internal or an external ID provided by the user
:return: The response body. | entailment |
def get_all_enclave_tags(self, enclave_ids=None):
"""
Retrieves all tags present in the given enclaves. If the enclave list is empty, the tags returned include all
tags for all enclaves the user has access to.
:param (string) list enclave_ids: list of enclave IDs
:return: The list of |Tag| objects.
"""
params = {'enclaveIds': enclave_ids}
resp = self._client.get("reports/tags", params=params)
return [Tag.from_dict(indicator) for indicator in resp.json()] | Retrieves all tags present in the given enclaves. If the enclave list is empty, the tags returned include all
tags for all enclaves the user has access to.
:param (string) list enclave_ids: list of enclave IDs
:return: The list of |Tag| objects. | entailment |
def add_indicator_tag(self, indicator_value, name, enclave_id):
"""
Adds a tag to a specific indicator, for a specific enclave.
:param indicator_value: The value of the indicator
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:return: A |Tag| object representing the tag that was created.
"""
data = {
'value': indicator_value,
'tag': {
'name': name,
'enclaveId': enclave_id
}
}
resp = self._client.post("indicators/tags", data=json.dumps(data))
return Tag.from_dict(resp.json()) | Adds a tag to a specific indicator, for a specific enclave.
:param indicator_value: The value of the indicator
:param name: The name of the tag to be added
:param enclave_id: ID of the enclave where the tag will be added
:return: A |Tag| object representing the tag that was created. | entailment |
def delete_indicator_tag(self, indicator_value, tag_id):
"""
Deletes a tag from a specific indicator, in a specific enclave.
:param indicator_value: The value of the indicator to delete the tag from
:param tag_id: ID of the tag to delete
"""
params = {
'value': indicator_value
}
self._client.delete("indicators/tags/%s" % tag_id, params=params) | Deletes a tag from a specific indicator, in a specific enclave.
:param indicator_value: The value of the indicator to delete the tag from
:param tag_id: ID of the tag to delete | entailment |
def from_dict(cls, tag):
"""
Create a tag object from a dictionary. This method is intended for internal use, to construct a
:class:`Tag` object from the body of a response json. It expects the keys of the dictionary to match those
of the json that would be found in a response to an API call such as ``GET /enclave-tags``.
:param tag: The dictionary.
:return: The :class:`Tag` object.
"""
return Tag(name=tag.get('name'),
id=tag.get('guid'),
enclave_id=tag.get('enclaveId')) | Create a tag object from a dictionary. This method is intended for internal use, to construct a
:class:`Tag` object from the body of a response json. It expects the keys of the dictionary to match those
of the json that would be found in a response to an API call such as ``GET /enclave-tags``.
:param tag: The dictionary.
:return: The :class:`Tag` object. | entailment |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the tag.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the tag.
"""
if remove_nones:
d = super().to_dict(remove_nones=True)
else:
d = {
'name': self.name,
'id': self.id,
'enclaveId': self.enclave_id
}
return d | Creates a dictionary representation of the tag.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the tag. | entailment |
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
candidate_delimiters = ["\t", ",", "\s+"]
for candidate_delimiter in candidate_delimiters:
counts = [len(re.split(candidate_delimiter, line)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename) | Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents. | entailment |
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name)) | Ensure that all required columns are present in the given dataframe,
otherwise raise an exception. | entailment |
def topology_mdtraj(traj):
'''Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj.
'''
import mdtraj as md
top = {}
top['atom_types'] = [a.element.symbol for a in traj.topology.atoms]
top['atom_names'] = [a.name for a in traj.topology.atoms]
top['bonds'] = [(a.index, b.index) for a, b in traj.topology.bonds]
top['secondary_structure'] = md.compute_dssp(traj[0])[0]
top['residue_types'] = [r.name for r in traj.topology.residues ]
top['residue_indices'] = [ [a.index for a in r.atoms] for r in traj.topology.residues ]
return top | Generate topology spec for the MolecularViewer from mdtraj.
:param mdtraj.Trajectory traj: the trajectory
:return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj. | entailment |
def encode_numpy(array):
'''Encode a numpy array as a base64 encoded string, to be JSON serialized.
:return: a dictionary containing the fields:
- *data*: the base64 string
- *type*: the array type
- *shape*: the array shape
'''
return {'data' : base64.b64encode(array.data).decode('utf8'),
'type' : array.dtype.name,
'shape': array.shape} | Encode a numpy array as a base64 encoded string, to be JSON serialized.
:return: a dictionary containing the fields:
- *data*: the base64 string
- *type*: the array type
- *shape*: the array shape | entailment |
def load_cufflinks_dataframe(
filename,
id_column=ID_COLUMN,
fpkm_column=FPKM_COLUMN,
status_column=STATUS_COLUMN,
locus_column=LOCUS_COLUMN,
gene_names_column=GENE_NAMES_COLUMN,
drop_failed=True,
drop_lowdata=False,
drop_hidata=True,
replace_hidata_fpkm_value=None,
drop_nonchromosomal_loci=False,
drop_novel=False,
sep=None):
"""
Loads a Cufflinks tracking file, which contains expression levels
(in FPKM: Fragments Per Kilobase of transcript per Million fragments)
for transcript isoforms or whole genes. These transcripts/genes may be
previously known (in which case they have an Ensembl ID) or a novel
assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1")
Parameters
----------
filename : str
Filename of tracking file e.g. "genes.tracking_fpkm"
id_column : str, optional
fpkm_column : str, optional
status_column : str, optional
Name of column which indicates the FPKM estimate status. The column
name is typically "FPKM_status". Possible contained within this column
will be OK, FAIL, LOWDATA, HIDATA.
locus_column : str, optional
gene_names_column : str, optional
drop_failed : bool, optional
Drop rows whose FPKM status is "FAIL" (default=True)
drop_lowdata : bool, optional
Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought
there were too few reads to accurately estimate the FPKM (default=False)
drop_hidata : bool, optional
Drop rows whose FPKM status is "HIDATA", meaning that too many
fragments aligned to a feature for Cufflinks to process. Dropping
the most expressed genes seems like a stupid idea so: default=False
replace_hidata_fpkm_value : float, optional
If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0,
this argument lets you replace the FPKM with some known constant.
drop_nonchromosomal_loci : bool, optional
Drop rows whose location isn't on a canonical chromosome
i.e. doesn't start with "chr" (default=False)
drop_novel : bool, optional
Drop genes or isoforms that aren't found in Ensembl (default = False)
sep : str, optional
Separator between data fields in the FPKM tracking file
(default is to infer whether the file uses comma or whitespace)
Returns DataFrame with columns:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
if sep is None:
sep = infer_delimiter(filename)
df = pd.read_csv(filename, sep=sep, engine="c")
required_columns = {
status_column,
locus_column,
id_column,
gene_names_column,
fpkm_column
}
check_required_columns(df, filename, required_columns)
for flag, status_value in [
(drop_failed, "FAIL"),
(drop_lowdata, "LOWDATA"),
(drop_hidata, "HIDATA")]:
mask = df[status_column] == status_value
mask_count = mask.sum()
total_count = len(df)
if flag and mask_count > 0:
verb_str = "Dropping"
df = df[~mask]
else:
verb_str = "Keeping"
logging.info(
"%s %d/%d entries from %s with status=%s",
verb_str,
mask_count,
total_count,
filename,
status_value)
if drop_nonchromosomal_loci:
loci = df[locus_column]
chromosomal_loci = loci.str.startswith("chr")
n_dropped = (~chromosomal_loci).sum()
if n_dropped > 0:
logging.info("Dropping %d/%d non-chromosomal loci from %s" % (
n_dropped, len(df), filename))
df = df[chromosomal_loci]
if replace_hidata_fpkm_value:
hidata_mask = df[status_column] == "HIDATA"
n_hidata = hidata_mask.sum()
logging.info(
"Setting FPKM=%s for %d/%d entries with status=HIDATA",
replace_hidata_fpkm_value,
n_hidata,
len(df))
df[fpkm_column][hidata_mask] = replace_hidata_fpkm_value
if len(df) == 0:
raise ValueError("Empty FPKM tracking file: %s" % filename)
ids = df[id_column]
known = ids.str.startswith("ENS")
if known.sum() == 0:
raise ValueError("No Ensembl IDs found in %s" % filename)
if drop_novel:
n_dropped = (~known).sum()
if n_dropped > 0:
logging.info(
"Dropping %d/%d novel entries from %s",
n_dropped,
len(df),
filename)
df = df[known]
known = np.ones(len(df), dtype='bool')
loci = df[locus_column]
chromosomes, starts, ends = parse_locus_column(df[locus_column])
# gene names are given either as "-" or a comma separated list
# e.g. "BRAF1,PFAM2"
gene_names_strings = df[gene_names_column].copy()
gene_names_strings[gene_names_strings == "-"] = ""
# split each entry into a list of zero or more strings
gene_names_lists = gene_names_strings.str.split(",")
return pd.DataFrame({
"id": df[id_column],
"novel": ~known,
"fpkm": df[fpkm_column],
"chr": chromosomes,
"start": starts,
"end": ends,
"gene_names": gene_names_lists
}) | Loads a Cufflinks tracking file, which contains expression levels
(in FPKM: Fragments Per Kilobase of transcript per Million fragments)
for transcript isoforms or whole genes. These transcripts/genes may be
previously known (in which case they have an Ensembl ID) or a novel
assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1")
Parameters
----------
filename : str
Filename of tracking file e.g. "genes.tracking_fpkm"
id_column : str, optional
fpkm_column : str, optional
status_column : str, optional
Name of column which indicates the FPKM estimate status. The column
name is typically "FPKM_status". Possible contained within this column
will be OK, FAIL, LOWDATA, HIDATA.
locus_column : str, optional
gene_names_column : str, optional
drop_failed : bool, optional
Drop rows whose FPKM status is "FAIL" (default=True)
drop_lowdata : bool, optional
Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought
there were too few reads to accurately estimate the FPKM (default=False)
drop_hidata : bool, optional
Drop rows whose FPKM status is "HIDATA", meaning that too many
fragments aligned to a feature for Cufflinks to process. Dropping
the most expressed genes seems like a stupid idea so: default=False
replace_hidata_fpkm_value : float, optional
If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0,
this argument lets you replace the FPKM with some known constant.
drop_nonchromosomal_loci : bool, optional
Drop rows whose location isn't on a canonical chromosome
i.e. doesn't start with "chr" (default=False)
drop_novel : bool, optional
Drop genes or isoforms that aren't found in Ensembl (default = False)
sep : str, optional
Separator between data fields in the FPKM tracking file
(default is to infer whether the file uses comma or whitespace)
Returns DataFrame with columns:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list | entailment |
def load_cufflinks_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
return {
row.id: row
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
} | Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list | entailment |
def load_cufflinks_fpkm_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to FPKM expression value.
"""
return {
row.id: row.fpkm
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
} | Returns dictionary mapping feature identifier (either transcript or gene ID)
to FPKM expression value. | entailment |
def enable_notebook(verbose=0):
"""Enable IPython notebook widgets to be displayed.
This function should be called before using the chemview widgets.
"""
libs = ['objexporter.js',
'ArcballControls.js', 'filesaver.js',
'base64-arraybuffer.js', 'context.js',
'chemview.js', 'three.min.js', 'jquery-ui.min.js',
'context.standalone.css', 'chemview_widget.js',
'trajectory_controls_widget.js', "layout_widget.js",
"components/jquery-fullscreen/jquery.fullscreen.js",
'scales.js']
fns = [resource_filename('chemview', os.path.join('static', f)) for f in libs]
[install_nbextension(fn, verbose=verbose, overwrite=True, user=True) for fn in fns] | Enable IPython notebook widgets to be displayed.
This function should be called before using the chemview widgets. | entailment |
def extract_pdf(file_name):
"""
Extract text from a pdf file
:param file_name path to pdf to read
:return text from pdf
"""
rsrcmgr = pdfminer.pdfinterp.PDFResourceManager()
sio = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, sio, codec='utf-8', laparams=laparams)
interpreter = pdfminer.pdfinterp.PDFPageInterpreter(rsrcmgr, device)
# Extract text from pdf file
with open(file_name, 'rb') as fp:
for page in PDFPage.get_pages(fp, maxpages=20):
interpreter.process_page(page)
text = sio.getvalue()
# Cleanup
device.close()
sio.close()
return text | Extract text from a pdf file
:param file_name path to pdf to read
:return text from pdf | entailment |
def process_file(source_file):
"""
Extract text from a file (pdf, txt, eml, csv, json)
:param source_file path to file to read
:return text from file
"""
if source_file.endswith(('.pdf', '.PDF')):
txt = extract_pdf(source_file)
elif source_file.endswith(('.txt', '.eml', '.csv', '.json')):
with open(source_file, 'r') as f:
txt = f.read()
else:
logger.info("Unsupported file extension for file {}".format(source_file))
return ""
return txt | Extract text from a file (pdf, txt, eml, csv, json)
:param source_file path to file to read
:return text from file | entailment |
def from_string(cls, string):
"""
Simply logs a warning if the desired enum value is not found.
:param string:
:return:
"""
# find enum value
for attr in dir(cls):
value = getattr(cls, attr)
if value == string:
return value
# if not found, log warning and return the value passed in
logger.warning("{} is not a valid enum value for {}.".format(string, cls.__name__))
return string | Simply logs a warning if the desired enum value is not found.
:param string:
:return: | entailment |
def validate(self, value):
"""
Validate (and possibly typecast) the given parameter value value.
:param value: Parameter value
:return: Typecast parameter value
:raises ValidationErrors: if there were validation errors
"""
errors = []
value = self._validate_type(value, errors)
self._validate_value(value, errors)
if errors:
raise ValidationErrors(errors)
return value | Validate (and possibly typecast) the given parameter value value.
:param value: Parameter value
:return: Typecast parameter value
:raises ValidationErrors: if there were validation errors | entailment |
def format_cli(self, value):
"""
Build a single parameter argument.
:return: list of CLI strings -- not escaped. If the parameter should not be expressed, returns None.
:rtype: list[str]|None
"""
if value is None or (self.type == 'flag' and not value):
return None
pass_as_bits = text_type(self.pass_as or self.default_pass_as).split()
env = dict(name=self.name, value=value, v=value)
return [bit.format(**env) for bit in pass_as_bits] | Build a single parameter argument.
:return: list of CLI strings -- not escaped. If the parameter should not be expressed, returns None.
:rtype: list[str]|None | entailment |
def listify(value):
"""
Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list
"""
if value is None:
return []
if isinstance(value, (list, tuple)):
return list(value)
return [value] | Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list | entailment |
def build_command(command, parameter_map):
"""
Build command line(s) using the given parameter map.
Even if the passed a single `command`, this function will return a list
of shell commands. It is the caller's responsibility to concatenate them,
likely using the semicolon or double ampersands.
:param command: The command to interpolate params into.
:type command: str|list[str]
:param parameter_map: A ParameterMap object containing parameter knowledge.
:type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap
:return: list of commands
:rtype: list[str]
"""
if isinstance(parameter_map, list): # Partially emulate old (pre-0.7) API for this function.
parameter_map = LegacyParameterMap(parameter_map)
out_commands = []
for command in listify(command):
# Only attempt formatting if the string smells like it should be formatted.
# This allows the user to include shell syntax in the commands, if required.
# (There's still naturally the chance for false-positives, so guard against
# those value errors and warn about them.)
if interpolable_re.search(command):
try:
command = interpolable_re.sub(
lambda match: _replace_interpolation(parameter_map, match),
command,
)
except ValueError as exc: # pragma: no cover
warnings.warn(
'failed to interpolate into %r: %s' % (command, exc),
CommandInterpolationWarning
)
out_commands.append(command.strip())
return out_commands | Build command line(s) using the given parameter map.
Even if the passed a single `command`, this function will return a list
of shell commands. It is the caller's responsibility to concatenate them,
likely using the semicolon or double ampersands.
:param command: The command to interpolate params into.
:type command: str|list[str]
:param parameter_map: A ParameterMap object containing parameter knowledge.
:type parameter_map: valohai_yaml.objs.parameter_map.ParameterMap
:return: list of commands
:rtype: list[str] | entailment |
def validate(yaml, raise_exc=True):
"""
Validate the given YAML document and return a list of errors.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param raise_exc: Whether to raise a meta-exception containing all discovered errors after validation.
:type raise_exc: bool
:return: A list of errors encountered.
:rtype: list[jsonschema.exceptions.ValidationError]
"""
data = read_yaml(yaml)
validator = get_validator()
# Nb: this uses a list instead of being a generator function in order to be
# easier to call correctly. (Were it a generator function, a plain
# `validate(..., raise_exc=True)` would not do anything.
errors = list(validator.iter_errors(data))
if errors and raise_exc:
raise ValidationErrors(errors)
return errors | Validate the given YAML document and return a list of errors.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param raise_exc: Whether to raise a meta-exception containing all discovered errors after validation.
:type raise_exc: bool
:return: A list of errors encountered.
:rtype: list[jsonschema.exceptions.ValidationError] | entailment |
def build_parameters(self):
"""
Build the CLI command line from the parameter values.
:return: list of CLI strings -- not escaped!
:rtype: list[str]
"""
param_bits = []
for name in self.parameters:
param_bits.extend(self.build_parameter_by_name(name) or [])
return param_bits | Build the CLI command line from the parameter values.
:return: list of CLI strings -- not escaped!
:rtype: list[str] | entailment |
def parse(cls, data):
"""
Parse a Config structure out of a Python dict (that's likely deserialized from YAML).
:param data: Config-y dict
:type data: dict
:return: Config object
:rtype: valohai_yaml.objs.Config
"""
parsers = {
'step': ([], Step.parse),
'endpoint': ([], Endpoint.parse),
}
for datum in data:
assert isinstance(datum, dict)
for type, (items, parse) in parsers.items():
if type in datum:
items.append(parse(datum[type]))
break
else:
raise ValueError('No parser for {0}'.format(datum))
inst = cls(
steps=parsers['step'][0],
endpoints=parsers['endpoint'][0],
)
inst._original_data = data
return inst | Parse a Config structure out of a Python dict (that's likely deserialized from YAML).
:param data: Config-y dict
:type data: dict
:return: Config object
:rtype: valohai_yaml.objs.Config | entailment |
def get_step_by(self, **kwargs):
"""
Get the first step that matches all the passed named arguments.
Has special argument index not present in the real step.
Usage:
config.get_step_by(name='not found')
config.get_step_by(index=0)
config.get_step_by(name="greeting", command='echo HELLO MORDOR')
:param kwargs:
:return: Step object or None
:rtype: valohai_yaml.objs.Step|None
"""
if not kwargs:
return None
for index, step in enumerate(self.steps.values()):
extended_step = dict(step.serialize(), index=index)
# check if kwargs is a subset of extended_step
if all(item in extended_step.items() for item in kwargs.items()):
return step
return None | Get the first step that matches all the passed named arguments.
Has special argument index not present in the real step.
Usage:
config.get_step_by(name='not found')
config.get_step_by(index=0)
config.get_step_by(name="greeting", command='echo HELLO MORDOR')
:param kwargs:
:return: Step object or None
:rtype: valohai_yaml.objs.Step|None | entailment |
def parse(yaml, validate=True):
"""
Parse the given YAML data into a `Config` object, optionally validating it first.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param validate: Whether to validate the data before attempting to parse it.
:type validate: bool
:return: Config object
:rtype: valohai_yaml.objs.Config
"""
data = read_yaml(yaml)
if validate: # pragma: no branch
from .validation import validate
validate(data, raise_exc=True)
return Config.parse(data) | Parse the given YAML data into a `Config` object, optionally validating it first.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param validate: Whether to validate the data before attempting to parse it.
:type validate: bool
:return: Config object
:rtype: valohai_yaml.objs.Config | entailment |
def get_parameter_defaults(self, include_flags=True):
"""
Get a dict mapping parameter names to their defaults (if set).
:rtype: dict[str, object]
"""
return {
name: parameter.default
for (name, parameter)
in self.parameters.items()
if parameter.default is not None and (include_flags or parameter.type != 'flag')
} | Get a dict mapping parameter names to their defaults (if set).
:rtype: dict[str, object] | entailment |
def build_command(self, parameter_values, command=None):
"""
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
"""
command = (command or self.command)
# merge defaults with passed values
# ignore flag default values as they are special
# undefined flag will remain undefined regardless of default value
values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values)
parameter_map = ParameterMap(parameters=self.parameters, values=values)
return build_command(command, parameter_map) | Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str] | entailment |
def lint_file(file_path):
"""
Validate & lint `file_path` and return a LintResult.
:param file_path: YAML filename
:type file_path: str
:return: LintResult object
"""
with open(file_path, 'r') as yaml:
try:
return lint(yaml)
except Exception as e:
lr = LintResult()
lr.add_error('could not parse YAML: %s' % e, exception=e)
return lr | Validate & lint `file_path` and return a LintResult.
:param file_path: YAML filename
:type file_path: str
:return: LintResult object | entailment |
def update(self):
"""Actual update process goes here using auxialary ``get_currencies``
and ``get_exchangerates`` methods. This method creates or updates
corresponding ``Currency`` and ``ExchangeRate`` models
"""
currencies = self.get_currencies()
currency_objects = {}
for code, name in currencies:
currency_objects[code], created = Currency.objects.get_or_create(
code=code, defaults={'name': name})
if created:
logger.info('currency: %s created', code)
existing = ExchangeRate.objects.values('source__code',
'target__code',
'id')
existing = {(d['source__code'], d['target__code']): d['id']
for d in existing}
usd_exchange_rates = dict(self.get_exchangerates('USD'))
updates = []
inserts = []
for source in currencies:
for target in currencies:
rate = self._get_rate_through_usd(source.code,
target.code,
usd_exchange_rates)
exchange_rate = ExchangeRate(source=currency_objects[source.code],
target=currency_objects[target.code],
rate=rate)
if (source.code, target.code) in existing:
exchange_rate.id = existing[(source.code, target.code)]
updates.append(exchange_rate)
logger.debug('exchange rate updated %s/%s=%s'
% (source, target, rate))
else:
inserts.append(exchange_rate)
logger.debug('exchange rate created %s/%s=%s'
% (source, target, rate))
logger.info('exchange rates updated for %s' % source.code)
logger.info("Updating %s rows" % len(updates))
update_many(updates)
logger.info("Inserting %s rows" % len(inserts))
insert_many(inserts)
logger.info('saved rates to db') | Actual update process goes here using auxialary ``get_currencies``
and ``get_exchangerates`` methods. This method creates or updates
corresponding ``Currency`` and ``ExchangeRate`` models | entailment |
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) | Turn all capturing groups in a regular expression pattern into
non-capturing groups. | entailment |
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res | Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. | entailment |
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part | Yield chunks from a range in a file. No chunk is bigger than maxread. | entailment |
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers) | Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8) | entailment |
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode) | Change the debug level.
There is only one debug level supported at the moment. | entailment |
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None | Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None | entailment |
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass | Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive. | entailment |
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator | Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. | entailment |
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator | Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters. | entailment |
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method) | Add a new rule or replace the target for an existing rule. | entailment |
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) | Build an URL by filling the wildcards in a rule. | entailment |
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path)) | Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). | entailment |
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func | Return the callback. If the callback is a decorated function, try to
recover the original function. | entailment |
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default | Lookup a config field and return its value, first checking the
route.config, then route.app.config. | entailment |
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func) | Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called. | entailment |
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True | Remove a callback from a hook. | entailment |
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] | Trigger a hook and return a list of results. | entailment |
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator | Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details. | entailment |
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
depr('Parameter order of Bottle.mount() changed.', True) # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options) | Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call. | entailment |
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route) | Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. | entailment |
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset') | Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. | entailment |
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare() | Add a route object, but do not change the :data:`Route.app`
attribute. | entailment |
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter | Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes | entailment |
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)] | The bottle WSGI-interface. | entailment |
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms | Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. | entailment |
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params | A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. | entailment |
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files | File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`. | entailment |
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None | If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. | entailment |
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post | The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads). | entailment |
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') | The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. | entailment |
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy | Returns a copy of self. | entailment |
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)] | Create a new response header, replacing any previously defined
headers with the same name. | entailment |
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out | WSGI conform list of (header, value) tuples. | entailment |
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default | Return the charset specified in the content-type header (default: utf8). | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.