sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def release(self, id):
"""Get a single release.
:param int id: (required), id of release
:returns: :class:`Release <github3.repos.release.Release>`
"""
json = None
if int(id) > 0:
url = self._build_url('releases', str(id), base_url=self._api)
json = self._json(self._get(url), 200)
return Release(json, self) if json else None
|
Get a single release.
:param int id: (required), id of release
:returns: :class:`Release <github3.repos.release.Release>`
|
entailment
|
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param str login: (required), login name of the collaborator
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
|
Remove collaborator ``login`` from the repository.
:param str login: (required), login name of the collaborator
:returns: bool
|
entailment
|
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param str sha: (required), sha of the object for this tag
:returns: :class:`Tag <github3.git.Tag>`
"""
json = None
if sha:
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
|
Get an annotated tag.
http://learn.github.com/p/tagging.html
:param str sha: (required), sha of the object for this tag
:returns: :class:`Tag <github3.git.Tag>`
|
entailment
|
def tree(self, sha):
"""Get a tree.
:param str sha: (required), sha of the object for this tree
:returns: :class:`Tree <github3.git.Tree>`
"""
json = None
if sha:
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
|
Get a tree.
:param str sha: (required), sha of the object for this tree
:returns: :class:`Tree <github3.git.Tree>`
|
entailment
|
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param str name: (required), name of the label
:param str color: (required), color code
:param str new_name: (optional), new name of the label
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
|
Update the label ``name``.
:param str name: (required), name of the label
:param str color: (required), color code
:param str new_name: (optional), new name of the label
:returns: bool
|
entailment
|
def weekly_commit_count(self):
"""Returns the total commit counts.
The dictionary returned has two entries: ``all`` and ``owner``. Each
has a fifty-two element long list of commit counts. (Note: ``all``
includes the owner.) ``d['all'][0]`` will be the oldest week,
``d['all'][51]`` will be the most recent.
:returns: dict
.. note:: All statistics methods may return a 202. If github3.py
receives a 202 in this case, it will return an emtpy dictionary.
You should give the API a moment to compose the data and then re
-request it via this method.
..versionadded:: 0.7
"""
url = self._build_url('stats', 'participation', base_url=self._api)
resp = self._get(url)
if resp.status_code == 202:
return {}
json = self._json(resp, 200)
if json.get('ETag'):
del json['ETag']
if json.get('Last-Modified'):
del json['Last-Modified']
return json
|
Returns the total commit counts.
The dictionary returned has two entries: ``all`` and ``owner``. Each
has a fifty-two element long list of commit counts. (Note: ``all``
includes the owner.) ``d['all'][0]`` will be the oldest week,
``d['all'][51]`` will be the most recent.
:returns: dict
.. note:: All statistics methods may return a 202. If github3.py
receives a 202 in this case, it will return an emtpy dictionary.
You should give the API a moment to compose the data and then re
-request it via this method.
..versionadded:: 0.7
|
entailment
|
def rename(args):
"""Supply two names: Existing instance name or ID, and new name to assign to the instance."""
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run)
|
Supply two names: Existing instance name or ID, and new name to assign to the instance.
|
entailment
|
def create_status(self, state, target_url='', description=''):
"""Create a new deployment status for this deployment.
:param str state: (required), The state of the status. Can be one of
``pending``, ``success``, ``error``, or ``failure``.
:param str target_url: The target URL to associate with this status.
This URL should contain output to keep the user updated while the
task is running or serve as historical information for what
happened in the deployment. Default: ''.
:param str description: A short description of the status. Default: ''.
:return: partial :class:`DeploymentStatus <DeploymentStatus>`
"""
json = None
if state in ('pending', 'success', 'error', 'failure'):
data = {'state': state, 'target_url': target_url,
'description': description}
response = self._post(self.statuses_url, data=data,
headers=Deployment.CUSTOM_HEADERS)
json = self._json(response, 201)
return DeploymentStatus(json, self) if json else None
|
Create a new deployment status for this deployment.
:param str state: (required), The state of the status. Can be one of
``pending``, ``success``, ``error``, or ``failure``.
:param str target_url: The target URL to associate with this status.
This URL should contain output to keep the user updated while the
task is running or serve as historical information for what
happened in the deployment. Default: ''.
:param str description: A short description of the status. Default: ''.
:return: partial :class:`DeploymentStatus <DeploymentStatus>`
|
entailment
|
def iter_statuses(self, number=-1, etag=None):
"""Iterate over the deployment statuses for this deployment.
:param int number: (optional), the number of statuses to return.
Default: -1, returns all statuses.
:param str etag: (optional), the ETag header value from the last time
you iterated over the statuses.
:returns: generator of :class:`DeploymentStatus`\ es
"""
i = self._iter(int(number), self.statuses_url, DeploymentStatus,
etag=etag)
i.headers = Deployment.CUSTOM_HEADERS
return i
|
Iterate over the deployment statuses for this deployment.
:param int number: (optional), the number of statuses to return.
Default: -1, returns all statuses.
:param str etag: (optional), the ETag header value from the last time
you iterated over the statuses.
:returns: generator of :class:`DeploymentStatus`\ es
|
entailment
|
def get_gist(self):
"""Retrieve the gist at this version.
:returns: :class:`Gist <github3.gists.gist.Gist>`
"""
from .gist import Gist
json = self._json(self._get(self._api), 200)
return Gist(json, self)
|
Retrieve the gist at this version.
:returns: :class:`Gist <github3.gists.gist.Gist>`
|
entailment
|
def update(self, sha, force=False):
"""Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
"""
data = {'sha': sha, 'force': force}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
|
Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
|
entailment
|
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return Tree(json, self._session) if json else None
|
Recurse into the tree.
:returns: :class:`Tree <Tree>`
|
entailment
|
def format_table(table, column_names=None, column_specs=None, max_col_width=32, auto_col_width=False):
"""
Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']))
"""
orig_col_args = dict(column_names=column_names, column_specs=column_specs)
if len(table) > 0:
col_widths = [0] * len(table[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_col_names, id_column = [], None
if column_specs is not None:
column_names = ["Row"]
column_names.extend([col["name"] for col in column_specs])
column_specs = [{"name": "Row", "type": "float"}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
if column_names[i].lower() == "id":
id_column = i
my_col = ansi_truncate(str(column_names[i]), max_col_width if i not in {0, id_column} else 99)
my_col_names.append(my_col)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_col)))
trunc_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = ansi_truncate(str(row[i]), max_col_width if i not in {0, id_column} else 99)
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_item)))
trunc_table.append(my_row)
type_colormap = {"boolean": BLUE(),
"integer": YELLOW(),
"float": WHITE(),
"string": GREEN()}
for i in "uint8", "int16", "uint16", "int32", "uint32", "int64":
type_colormap[i] = type_colormap["integer"]
type_colormap["double"] = type_colormap["float"]
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]["type"]] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border("β") + border("β¬").join(border("β") * i for i in col_widths) + border("β")]
if len(my_col_names) > 0:
padded_column_names = [col_head(i) + " " * (col_widths[i] - len(my_col_names[i]))
for i in range(len(my_col_names))]
formatted_table.append(border("β") + border("β").join(padded_column_names) + border("β"))
formatted_table.append(border("β") + border("βΌ").join(border("β") * i for i in col_widths) + border("β€"))
for row in trunc_table:
padded_row = [row[i] + " " * (col_widths[i] - len(strip_ansi_codes(row[i]))) for i in range(len(row))]
formatted_table.append(border("β") + border("β").join(padded_row) + border("β"))
formatted_table.append(border("β") + border("β΄").join(border("β") * i for i in col_widths) + border("β"))
if auto_col_width:
if not sys.stdout.isatty():
raise AegeaException("Cannot auto-format table, output is not a terminal")
table_width = len(strip_ansi_codes(formatted_table[0]))
tty_cols, tty_rows = get_terminal_size()
if table_width > max(tty_cols, 80):
return format_table(table, max_col_width=max_col_width - 1, auto_col_width=True, **orig_col_args)
return "\n".join(formatted_table)
|
Table pretty printer. Expects tables to be given as arrays of arrays::
print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']))
|
entailment
|
def get(args):
"""Get an Aegea configuration parameter by name"""
from . import config
for key in args.key.split("."):
config = getattr(config, key)
print(json.dumps(config))
|
Get an Aegea configuration parameter by name
|
entailment
|
def set(args):
"""Set an Aegea configuration parameter to a given value"""
from . import config, tweak
class ConfigSaver(tweak.Config):
@property
def config_files(self):
return [config.config_files[2]]
config_saver = ConfigSaver(use_yaml=True, save_on_exit=False)
c = config_saver
for key in args.key.split(".")[:-1]:
try:
c = c[key]
except KeyError:
c[key] = {}
c = c[key]
c[args.key.split(".")[-1]] = json.loads(args.value) if args.json else args.value
config_saver.save()
|
Set an Aegea configuration parameter to a given value
|
entailment
|
def authorize(login, password, scopes, note='', note_url='', client_id='',
client_secret='', two_factor_callback=None):
"""Obtain an authorization token for the GitHub API.
:param str login: (required)
:param str password: (required)
:param list scopes: (required), areas you want this token to apply to,
i.e., 'gist', 'user'
:param str note: (optional), note about the authorization
:param str note_url: (optional), url for the application
:param str client_id: (optional), 20 character OAuth client key for which
to create a token
:param str client_secret: (optional), 40 character OAuth client secret for
which to create the token
:param func two_factor_callback: (optional), function to call when a
Two-Factor Authentication code needs to be provided by the user.
:returns: :class:`Authorization <Authorization>`
"""
gh = GitHub()
gh.login(two_factor_callback=two_factor_callback)
return gh.authorize(login, password, scopes, note, note_url, client_id,
client_secret)
|
Obtain an authorization token for the GitHub API.
:param str login: (required)
:param str password: (required)
:param list scopes: (required), areas you want this token to apply to,
i.e., 'gist', 'user'
:param str note: (optional), note about the authorization
:param str note_url: (optional), url for the application
:param str client_id: (optional), 20 character OAuth client key for which
to create a token
:param str client_secret: (optional), 40 character OAuth client secret for
which to create the token
:param func two_factor_callback: (optional), function to call when a
Two-Factor Authentication code needs to be provided by the user.
:returns: :class:`Authorization <Authorization>`
|
entailment
|
def login(username=None, password=None, token=None, url=None,
two_factor_callback=None):
"""Construct and return an authenticated GitHub session.
This will return a GitHubEnterprise session if a url is provided.
:param str username: login name
:param str password: password for the login
:param str token: OAuth token
:param str url: (optional), URL of a GitHub Enterprise instance
:param func two_factor_callback: (optional), function you implement to
provide the Two Factor Authentication code to GitHub when necessary
:returns: :class:`GitHub <github3.github.GitHub>`
"""
g = None
if (username and password) or token:
g = GitHubEnterprise(url) if url is not None else GitHub()
g.login(username, password, token, two_factor_callback)
return g
|
Construct and return an authenticated GitHub session.
This will return a GitHubEnterprise session if a url is provided.
:param str username: login name
:param str password: password for the login
:param str token: OAuth token
:param str url: (optional), URL of a GitHub Enterprise instance
:param func two_factor_callback: (optional), function you implement to
provide the Two Factor Authentication code to GitHub when necessary
:returns: :class:`GitHub <github3.github.GitHub>`
|
entailment
|
def iter_followers(username, number=-1, etag=None):
"""List the followers of ``username``.
:param str username: (required), login of the person to list the followers
of
:param int number: (optional), number of followers to return, Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
"""
return gh.iter_followers(username, number, etag) if username else []
|
List the followers of ``username``.
:param str username: (required), login of the person to list the followers
of
:param int number: (optional), number of followers to return, Default: -1,
return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
|
entailment
|
def iter_following(username, number=-1, etag=None):
"""List the people ``username`` follows.
:param str username: (required), login of the user
:param int number: (optional), number of users being followed by username
to return. Default: -1, return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
"""
return gh.iter_following(username, number, etag) if username else []
|
List the people ``username`` follows.
:param str username: (required), login of the user
:param int number: (optional), number of users being followed by username
to return. Default: -1, return all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`
|
entailment
|
def iter_repo_issues(owner, repository, milestone=None, state=None,
assignee=None, mentioned=None, labels=None, sort=None,
direction=None, since=None, number=-1, etag=None):
"""List issues on owner/repository. Only owner and repository are
required.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str owner: login of the owner of the repository
:param str repository: name of the repository
:param int milestone: None, '*', or ID of milestone
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str assignee: '*' or login of the user
:param str mentioned: login of the user
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`\ s
"""
if owner and repository:
return gh.iter_repo_issues(owner, repository, milestone, state,
assignee, mentioned, labels, sort,
direction, since, number, etag)
return iter([])
|
List issues on owner/repository. Only owner and repository are
required.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str owner: login of the owner of the repository
:param str repository: name of the repository
:param int milestone: None, '*', or ID of milestone
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str assignee: '*' or login of the user
:param str mentioned: login of the user
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`\ s
|
entailment
|
def iter_orgs(username, number=-1, etag=None):
"""List the organizations associated with ``username``.
:param str username: (required), login of the user
:param int number: (optional), number of orgs to return. Default: -1,
return all of the issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>`
"""
return gh.iter_orgs(username, number, etag) if username else []
|
List the organizations associated with ``username``.
:param str username: (required), login of the user
:param int number: (optional), number of orgs to return. Default: -1,
return all of the issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>`
|
entailment
|
def iter_user_repos(login, type=None, sort=None, direction=None, number=-1,
etag=None):
"""List public repositories for the specified ``login``.
.. versionadded:: 0.6
.. note:: This replaces github3.iter_repos
:param str login: (required)
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
if login:
return gh.iter_user_repos(login, type, sort, direction, number, etag)
return iter([])
|
List public repositories for the specified ``login``.
.. versionadded:: 0.6
.. note:: This replaces github3.iter_repos
:param str login: (required)
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
|
entailment
|
def markdown(text, mode='', context='', raw=False):
"""Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no gfm,
no context
:returns: str -- HTML formatted text
"""
return gh.markdown(text, mode, context, raw)
|
Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no gfm,
no context
:returns: str -- HTML formatted text
|
entailment
|
def search_repositories(query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find repositories via various criteria.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
"""
return gh.search_repositories(query, sort, order, per_page, text_match,
number, etag)
|
Find repositories via various criteria.
.. warning::
You will only be able to make 5 calls with this or other search
functions. To raise the rate-limit on this set of endpoints, create an
authenticated :class:`GitHub <github3.github.GitHub>` Session with
``login``.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
|
entailment
|
def limits(args):
"""
Describe limits in effect on your AWS account. See also https://console.aws.amazon.com/ec2/v2/home#Limits:
"""
# https://aws.amazon.com/about-aws/whats-new/2014/06/19/amazon-ec2-service-limits-report-now-available/
# Console-only APIs: getInstanceLimits, getAccountLimits, getAutoscalingLimits, getHostLimits
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_limits
attrs = ["max-instances", "vpc-max-security-groups-per-interface", "vpc-max-elastic-ips"]
table = clients.ec2.describe_account_attributes(AttributeNames=attrs)["AccountAttributes"]
page_output(tabulate(table, args))
|
Describe limits in effect on your AWS account. See also https://console.aws.amazon.com/ec2/v2/home#Limits:
|
entailment
|
def add_labels(self, *args):
"""Add labels to this issue.
:param str args: (required), names of the labels you wish to add
:returns: list of :class:`Label`\ s
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data=args), 200)
return [Label(l, self) for l in json] if json else []
|
Add labels to this issue.
:param str args: (required), names of the labels you wish to add
:returns: list of :class:`Label`\ s
|
entailment
|
def assign(self, login):
"""Assigns user ``login`` to this issue. This is a short cut for
``issue.edit``.
:param str login: username of the person to assign this issue to
:returns: bool
"""
if not login:
return False
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.labels]
return self.edit(self.title, self.body, login, self.state, number,
labels)
|
Assigns user ``login`` to this issue. This is a short cut for
``issue.edit``.
:param str login: username of the person to assign this issue to
:returns: bool
|
entailment
|
def comment(self, id_num):
"""Get a single comment by its id.
The catch here is that id is NOT a simple number to obtain. If
you were to look at the comments on issue #15 in
sigmavirus24/Todo.txt-python, the first comment's id is 4150787.
:param int id_num: (required), comment id, see example above
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if int(id_num) > 0: # Might as well check that it's positive
owner, repo = self.repository
url = self._build_url('repos', owner, repo, 'issues', 'comments',
str(id_num))
json = self._json(self._get(url), 200)
return IssueComment(json) if json else None
|
Get a single comment by its id.
The catch here is that id is NOT a simple number to obtain. If
you were to look at the comments on issue #15 in
sigmavirus24/Todo.txt-python, the first comment's id is 4150787.
:param int id_num: (required), comment id, see example above
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
|
entailment
|
def create_comment(self, body):
"""Create a comment on this issue.
:param str body: (required), comment body
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if body:
url = self._build_url('comments', base_url=self._api)
json = self._json(self._post(url, data={'body': body}),
201)
return IssueComment(json, self) if json else None
|
Create a comment on this issue.
:param str body: (required), comment body
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
|
entailment
|
def edit(self, title=None, body=None, assignee=None, state=None,
milestone=None, labels=None):
"""Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
"""
json = None
data = {'title': title, 'body': body, 'assignee': assignee,
'state': state, 'milestone': milestone, 'labels': labels}
self._remove_none(data)
if data:
if 'milestone' in data and data['milestone'] == 0:
data['milestone'] = None
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
|
Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
|
entailment
|
def iter_comments(self, number=-1):
"""Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, IssueComment)
|
Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
|
entailment
|
def iter_events(self, number=-1):
"""Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
|
Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
|
entailment
|
def remove_label(self, name):
"""Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
"""
url = self._build_url('labels', name, base_url=self._api)
# Docs say it should be a list of strings returned, practice says it
# is just a 204/404 response. I'm tenatively changing this until I
# hear back from Support.
return self._boolean(self._delete(url), 204, 404)
|
Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
|
entailment
|
def replace_labels(self, labels):
"""Replace all labels on this issue with ``labels``.
:param list labels: label names
:returns: bool
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._put(url, data=dumps(labels)), 200)
return [Label(l, self) for l in json] if json else []
|
Replace all labels on this issue with ``labels``.
:param list labels: label names
:returns: bool
|
entailment
|
def reopen(self):
"""Re-open a closed issue.
:returns: bool
"""
assignee = self.assignee.login if self.assignee else ''
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.labels]
return self.edit(self.title, self.body, assignee, 'open',
number, labels)
|
Re-open a closed issue.
:returns: bool
|
entailment
|
def _strptime(self, time_str):
"""Convert an ISO 8601 formatted string in UTC into a
timezone-aware datetime object."""
if time_str:
# Parse UTC string into naive datetime, then add timezone
dt = datetime.strptime(time_str, __timeformat__)
return dt.replace(tzinfo=UTC())
return None
|
Convert an ISO 8601 formatted string in UTC into a
timezone-aware datetime object.
|
entailment
|
def _iter(self, count, url, cls, params=None, etag=None):
"""Generic iterator for this project.
:param int count: How many items to return.
:param int url: First URL to start with
:param class cls: cls to return an object of
:param params dict: (optional) Parameters for the request
:param str etag: (optional), ETag from the last call
"""
from .structs import GitHubIterator
return GitHubIterator(count, url, cls, self, params, etag)
|
Generic iterator for this project.
:param int count: How many items to return.
:param int url: First URL to start with
:param class cls: cls to return an object of
:param params dict: (optional) Parameters for the request
:param str etag: (optional), ETag from the last call
|
entailment
|
def ratelimit_remaining(self):
"""Number of requests before GitHub imposes a ratelimit.
:returns: int
"""
json = self._json(self._get(self._github_url + '/rate_limit'), 200)
core = json.get('resources', {}).get('core', {})
self._remaining = core.get('remaining', 0)
return self._remaining
|
Number of requests before GitHub imposes a ratelimit.
:returns: int
|
entailment
|
def refresh(self, conditional=False):
"""Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests
"""
headers = {}
if conditional:
if self.last_modified:
headers['If-Modified-Since'] = self.last_modified
elif self.etag:
headers['If-None-Match'] = self.etag
headers = headers or None
json = self._json(self._get(self._api, headers=headers), 200)
if json is not None:
self.__init__(json, self._session)
return self
|
Re-retrieve the information for this object and returns the
refreshed instance.
:param bool conditional: If True, then we will search for a stored
header ('Last-Modified', or 'ETag') on the object and send that
as described in the `Conditional Requests`_ section of the docs
:returns: self
The reasoning for the return value is the following example: ::
repos = [r.refresh() for r in g.iter_repos('kennethreitz')]
Without the return value, that would be an array of ``None``'s and you
would otherwise have to do: ::
repos = [r for i in g.iter_repos('kennethreitz')]
[r.refresh() for r in repos]
Which is really an anti-pattern.
.. versionchanged:: 0.5
.. _Conditional Requests:
http://developer.github.com/v3/#conditional-requests
|
entailment
|
def edit(self, body):
"""Edit this comment.
:param str body: (required), new body of the comment, Markdown
formatted
:returns: bool
"""
if body:
json = self._json(self._patch(self._api,
data=dumps({'body': body})), 200)
if json:
self._update_(json)
return True
return False
|
Edit this comment.
:param str body: (required), new body of the comment, Markdown
formatted
:returns: bool
|
entailment
|
def toplot(ts,
filename=None,
grid=True,
legend=True,
pargs=(),
**kwargs):
'''To plot formatter'''
fig = plt.figure()
ax = fig.add_subplot(111)
dates = list(ts.dates())
ax.plot(dates, ts.values(), *pargs)
ax.grid(grid)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# add legend or title
names = ts.name.split('__')
if len(names) == 1:
title = names[0]
fontweight = kwargs.get('title_fontweight', 'bold')
ax.set_title(title, fontweight=fontweight)#,fontsize=fontsize,
elif legend:
##add legend
loc = kwargs.get('legend_location','best')
ncol = kwargs.get('legend_ncol', 2)
ax.legend(names, loc=loc, ncol=ncol)
return plt
|
To plot formatter
|
entailment
|
def check_table(table):
"""
Ensure the table is valid for converting to grid table.
* The table must a list of lists
* Each row must contain the same number of columns
* The table must not be empty
Parameters
----------
table : list of lists of str
The list of rows of strings to convert to a grid table
Returns
-------
message : str
If no problems are found, this message is empty, otherwise it
tries to describe the problem that was found.
"""
if not type(table) is list:
return "Table must be a list of lists"
if len(table) == 0:
return "Table must contain at least one row and one column"
for i in range(len(table)):
if not type(table[i]) is list:
return "Table must be a list of lists"
if not len(table[i]) == len(table[0]):
"Each row must have the same number of columns"
return ""
|
Ensure the table is valid for converting to grid table.
* The table must a list of lists
* Each row must contain the same number of columns
* The table must not be empty
Parameters
----------
table : list of lists of str
The list of rows of strings to convert to a grid table
Returns
-------
message : str
If no problems are found, this message is empty, otherwise it
tries to describe the problem that was found.
|
entailment
|
def get_span(spans, row, column):
"""
Gets the span containing the [row, column] pair
Parameters
----------
spans : list of lists of lists
A list containing spans, which are lists of [row, column] pairs
that define where a span is inside a table.
Returns
-------
span : list of lists
A span containing the [row, column] pair
"""
for i in range(len(spans)):
if [row, column] in spans[i]:
return spans[i]
return None
|
Gets the span containing the [row, column] pair
Parameters
----------
spans : list of lists of lists
A list containing spans, which are lists of [row, column] pairs
that define where a span is inside a table.
Returns
-------
span : list of lists
A span containing the [row, column] pair
|
entailment
|
def find_unassigned_table_cell(table):
"""
Search through a table and return the first [row, column] pair
who's value is None.
Parameters
----------
table : list of lists of str
Returns
-------
list of int
The row column pair of the None type cell
"""
for row in range(len(table)):
for column in range(len(table[row])):
if table[row][column] is None:
return row, column
return row, column
|
Search through a table and return the first [row, column] pair
who's value is None.
Parameters
----------
table : list of lists of str
Returns
-------
list of int
The row column pair of the None type cell
|
entailment
|
def insert(self, dte, values):
'''insert *values* at date *dte*.'''
if len(values):
dte = self.dateconvert(dte)
if not self:
self._date = np.array([dte])
self._data = np.array([values])
else:
# search for the date
index = self._skl.rank(dte)
if index < 0:
# date not available
N = len(self._data)
index = -1-index
self._date.resize((N+1,))
self._data.resize((N+1, self.count()))
if index < N:
self._date[index+1:] = self._date[index:-1]
self._data[index+1:] = self._data[index:-1]
self._date[index] = dte
self._data[index] = values
self._skl.insert(dte)
|
insert *values* at date *dte*.
|
entailment
|
def _translate_nodes(root, *nodes):
"""
Convert node names into node instances...
"""
#name2node = {[n, None] for n in nodes if type(n) is str}
name2node = dict([[n, None] for n in nodes if type(n) is str])
for n in root.traverse():
if n.name in name2node:
if name2node[n.name] is not None:
raise TreeError("Ambiguous node name: {}".format(str(n.name)))
else:
name2node[n.name] = n
if None in list(name2node.values()):
notfound = [key for key, value in six.iteritems(name2node) if value is None]
raise ValueError("Node names not found: "+str(notfound))
valid_nodes = []
for n in nodes:
if type(n) is not str:
if type(n) is not root.__class__:
raise TreeError("Invalid target node: "+str(n))
else:
valid_nodes.append(n)
valid_nodes.extend(list(name2node.values()))
if len(valid_nodes) == 1:
return valid_nodes[0]
else:
return valid_nodes
|
Convert node names into node instances...
|
entailment
|
def add_feature(self, pr_name, pr_value):
""" Add or update a node's feature. """
setattr(self, pr_name, pr_value)
self.features.add(pr_name)
|
Add or update a node's feature.
|
entailment
|
def add_features(self, **features):
""" Add or update several features. """
for fname, fvalue in six.iteritems(features):
setattr(self, fname, fvalue)
self.features.add(fname)
|
Add or update several features.
|
entailment
|
def del_feature(self, pr_name):
""" Permanently deletes a node's feature."""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name)
|
Permanently deletes a node's feature.
|
entailment
|
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child
|
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
|
entailment
|
def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child
|
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
|
entailment
|
def add_sister(self, sister=None, name=None, dist=None):
"""
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
"""
if self.up == None:
raise TreeError("A parent node is required to add a sister")
else:
return self.up.add_child(child=sister, name=name, dist=dist)
|
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
|
entailment
|
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister)
|
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
|
entailment
|
def delete(self, prevent_nondicotomic=True, preserve_branch_length=False):
"""
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
Parameters:
-----------
prevent_nondicotomic:
When True (default), delete
function will be execute recursively to prevent single-child
nodes.
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original
distances among nodes.
**Example:**
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
"""
parent = self.up
if parent:
if preserve_branch_length:
if len(self.children) == 1:
self.children[0].dist += self.dist
elif len(self.children) > 1:
parent.dist += self.dist
for ch in self.children:
parent.add_child(ch)
parent.remove_child(self)
# Avoids parents with only one child
if prevent_nondicotomic and parent and\
len(parent.children) < 2:
parent.delete(prevent_nondicotomic=False,
preserve_branch_length=preserve_branch_length)
|
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
Parameters:
-----------
prevent_nondicotomic:
When True (default), delete
function will be execute recursively to prevent single-child
nodes.
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original
distances among nodes.
**Example:**
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
|
entailment
|
def detach(self):
"""
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
"""
if self.up:
self.up.children.remove(self)
self.up = None
return self
|
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
|
entailment
|
def prune(self, nodes, preserve_branch_length=False):
"""
Prunes the topology of a node to conserve only a selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
Parameters:
-----------
nodes:
a list of node names or node objects that should be retained
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original distances
among nodes.
**Examples:**
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
"""
def cmp_nodes(x, y):
# if several nodes are in the same path of two kept nodes,
# only one should be maintained. This prioritize internal
# nodes that are already in the to_keep list and then
# deeper nodes (closer to the leaves).
if n2depth[x] > n2depth[y]:
return -1
elif n2depth[x] < n2depth[y]:
return 1
else:
return 0
to_keep = set(_translate_nodes(self, *nodes))
start, node2path = self.get_common_ancestor(to_keep, get_path=True)
to_keep.add(self)
# Calculate which kept nodes are visiting the same nodes in
# their path to the common ancestor.
n2count = {}
n2depth = {}
for seed, path in six.iteritems(node2path):
for visited_node in path:
if visited_node not in n2depth:
depth = visited_node.get_distance(start, topology_only=True)
n2depth[visited_node] = depth
if visited_node is not seed:
n2count.setdefault(visited_node, set()).add(seed)
# if several internal nodes are in the path of exactly the same kept
# nodes, only one (the deepest) should be maintain.
visitors2nodes = {}
for node, visitors in six.iteritems(n2count):
# keep nodes connection at least two other nodes
if len(visitors)>1:
visitor_key = frozenset(visitors)
visitors2nodes.setdefault(visitor_key, set()).add(node)
for visitors, nodes in six.iteritems(visitors2nodes):
if not (to_keep & nodes):
sorted_nodes = sorted(nodes, key=cmp_to_key(cmp_nodes))
to_keep.add(sorted_nodes[0])
for n in self.get_descendants('postorder'):
if n not in to_keep:
if preserve_branch_length:
if len(n.children) == 1:
n.children[0].dist += n.dist
elif len(n.children) > 1 and n.up:
n.up.dist += n.dist
n.delete(prevent_nondicotomic=False)
|
Prunes the topology of a node to conserve only a selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
Parameters:
-----------
nodes:
a list of node names or node objects that should be retained
preserve_branch_length:
If True, branch lengths of the deleted nodes are transferred
(summed up) to its parent's branch, thus keeping original distances
among nodes.
**Examples:**
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
|
entailment
|
def get_sisters(self):
""" Returns an indepent list of sister nodes."""
if self.up != None:
return [ch for ch in self.up.children if ch != self]
else:
return []
|
Returns an indepent list of sister nodes.
|
entailment
|
def iter_leaves(self, is_leaf_fn=None):
""" Returns an iterator over the leaves under this node."""
for n in self.traverse(strategy="preorder", is_leaf_fn=is_leaf_fn):
if not is_leaf_fn:
if n.is_leaf():
yield n
else:
if is_leaf_fn(n):
yield n
|
Returns an iterator over the leaves under this node.
|
entailment
|
def iter_leaf_names(self, is_leaf_fn=None):
"""Returns an iterator over the leaf names under this node."""
for n in self.iter_leaves(is_leaf_fn=is_leaf_fn):
yield n.name
|
Returns an iterator over the leaf names under this node.
|
entailment
|
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None):
""" Returns an iterator over all descendant nodes."""
for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn):
if n is not self:
yield n
|
Returns an iterator over all descendant nodes.
|
entailment
|
def get_descendants(self, strategy="levelorder", is_leaf_fn=None):
""" Returns a list of all (leaves and internal) descendant nodes."""
return [n for n in self.iter_descendants(
strategy=strategy, is_leaf_fn=is_leaf_fn)]
|
Returns a list of all (leaves and internal) descendant nodes.
|
entailment
|
def traverse(self, strategy="levelorder", is_leaf_fn=None):
""" Returns an iterator to traverse tree under this node.
Parameters:
-----------
strategy:
set the way in which tree will be traversed. Possible
values are: "preorder" (first parent and then children)
'postorder' (first children and the parent) and
"levelorder" (nodes are visited in order from root to leaves)
is_leaf_fn:
If supplied, ``is_leaf_fn`` function will be used to
interrogate nodes about if they are terminal or internal.
``is_leaf_fn`` function should receive a node instance as first
argument and return True or False. Use this argument to
traverse a tree by dynamically collapsing internal nodes matching
``is_leaf_fn``.
"""
if strategy == "preorder":
return self._iter_descendants_preorder(is_leaf_fn=is_leaf_fn)
elif strategy == "levelorder":
return self._iter_descendants_levelorder(is_leaf_fn=is_leaf_fn)
elif strategy == "postorder":
return self._iter_descendants_postorder(is_leaf_fn=is_leaf_fn)
|
Returns an iterator to traverse tree under this node.
Parameters:
-----------
strategy:
set the way in which tree will be traversed. Possible
values are: "preorder" (first parent and then children)
'postorder' (first children and the parent) and
"levelorder" (nodes are visited in order from root to leaves)
is_leaf_fn:
If supplied, ``is_leaf_fn`` function will be used to
interrogate nodes about if they are terminal or internal.
``is_leaf_fn`` function should receive a node instance as first
argument and return True or False. Use this argument to
traverse a tree by dynamically collapsing internal nodes matching
``is_leaf_fn``.
|
entailment
|
def iter_prepostorder(self, is_leaf_fn=None):
"""
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
"""
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
yield (False, node)
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
#POSTORDER ACTIONS
yield (True, node)
|
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
|
entailment
|
def _iter_descendants_levelorder(self, is_leaf_fn=None):
""" Iterate over all desdecendant nodes."""
tovisit = deque([self])
while len(tovisit) > 0:
node = tovisit.popleft()
yield node
if not is_leaf_fn or not is_leaf_fn(node):
tovisit.extend(node.children)
|
Iterate over all desdecendant nodes.
|
entailment
|
def _iter_descendants_preorder(self, is_leaf_fn=None):
""" Iterator over all descendant nodes. """
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None
|
Iterator over all descendant nodes.
|
entailment
|
def iter_ancestors(self):
"""
Iterates over the list of all ancestor nodes from
current node to the current tree root.
"""
node = self
while node.up is not None:
yield node.up
node = node.up
|
Iterates over the list of all ancestor nodes from
current node to the current tree root.
|
entailment
|
def write(self,
features=None,
outfile=None,
format=0,
is_leaf_fn=None,
format_root_node=False,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
Parameters:
-----------
features:
a list of feature names to be exported using the Extended Newick
Format (i.e. features=["name", "dist"]). Use an empty list to
export all available features in each node (features=[])
outfile:
writes the output to a given file
format:
defines the newick standard used to encode the tree.
format_root_node:
If True, it allows features and branch information from root node
to be exported as a part of the newick text string. For newick
compatibility reasons, this is False by default.
is_leaf_fn:
See :func:`TreeNode.traverse` for documentation.
**Example:**
t.get_newick(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features,
format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw
|
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
Parameters:
-----------
features:
a list of feature names to be exported using the Extended Newick
Format (i.e. features=["name", "dist"]). Use an empty list to
export all available features in each node (features=[])
outfile:
writes the output to a given file
format:
defines the newick standard used to encode the tree.
format_root_node:
If True, it allows features and branch information from root node
to be exported as a part of the newick text string. For newick
compatibility reasons, this is False by default.
is_leaf_fn:
See :func:`TreeNode.traverse` for documentation.
**Example:**
t.get_newick(features=["species","name"], format=1)
|
entailment
|
def get_tree_root(self):
""" Returns the absolute root node of current tree structure."""
root = self
while root.up is not None:
root = root.up
return root
|
Returns the absolute root node of current tree structure.
|
entailment
|
def get_common_ancestor(self, *target_nodes, **kargs):
"""
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
"""
get_path = kargs.get("get_path", False)
if len(target_nodes) == 1 and type(target_nodes[0]) \
in set([set, tuple, list, frozenset]):
target_nodes = target_nodes[0]
# Convert node names into node instances
target_nodes = _translate_nodes(self, *target_nodes)
# If only one node is provided, use self as the second target
if type(target_nodes) != list:
target_nodes = [target_nodes, self]
n2path = {}
reference = []
ref_node = None
for n in target_nodes:
current = n
while current:
n2path.setdefault(n, set()).add(current)
if not ref_node:
reference.append(current)
current = current.up
if not ref_node:
ref_node = n
common = None
for n in reference:
broken = False
for node, path in six.iteritems(n2path):
if node is not ref_node and n not in path:
broken = True
break
if not broken:
common = n
break
if not common:
raise TreeError("Nodes are not connected!")
if get_path:
return common, n2path
else:
return common
|
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
|
entailment
|
def iter_search_nodes(self, **conditions):
"""
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
"""
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n
|
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
|
entailment
|
def search_nodes(self, **conditions):
"""
Returns the list of nodes matching a given set of conditions.
**Example:**
tree.search_nodes(dist=0.0, name="human")
"""
matching_nodes = []
for n in self.iter_search_nodes(**conditions):
matching_nodes.append(n)
return matching_nodes
|
Returns the list of nodes matching a given set of conditions.
**Example:**
tree.search_nodes(dist=0.0, name="human")
|
entailment
|
def get_distance(self, target, target2=None, topology_only=False):
"""
Returns the distance between two nodes. If only one target is
specified, it returns the distance bewtween the target and the
current node.
Parameters:
-----------
target:
a node within the same tree structure.
target2:
a node within the same tree structure. If not specified,
current node is used as target2.
topology_only:
If set to True, distance will refer to the number of nodes
between target and target2.
Returns:
--------
branch length distance between target and target2. If topology_only
flag is True, returns the number of nodes between target and target2.
"""
if target2 is None:
target2 = self
root = self.get_tree_root()
else:
# is target node under current node?
root = self
target, target2 = _translate_nodes(root, target, target2)
ancestor = root.get_common_ancestor(target, target2)
dist = 0.0
for n in [target2, target]:
current = n
while current != ancestor:
if topology_only:
if current!=target:
dist += 1
else:
dist += current.dist
current = current.up
return dist
|
Returns the distance between two nodes. If only one target is
specified, it returns the distance bewtween the target and the
current node.
Parameters:
-----------
target:
a node within the same tree structure.
target2:
a node within the same tree structure. If not specified,
current node is used as target2.
topology_only:
If set to True, distance will refer to the number of nodes
between target and target2.
Returns:
--------
branch length distance between target and target2. If topology_only
flag is True, returns the number of nodes between target and target2.
|
entailment
|
def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(
topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(
topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist
|
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
|
entailment
|
def get_farthest_leaf(self, topology_only=False, is_leaf_fn=None):
"""
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return max_node, max_dist
|
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
|
entailment
|
def get_midpoint_outgroup(self):
"""
Returns the node that divides the current tree into two
distance-balanced partitions.
"""
# Gets the farthest node to the current root
root = self.get_tree_root()
nA, r2A_dist = root.get_farthest_leaf()
nB, A2B_dist = nA.get_farthest_node()
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None:
cdist += current.dist
if cdist > (middist): # Deja de subir cuando se pasa del maximo
break
else:
current = current.up
return current
|
Returns the node that divides the current tree into two
distance-balanced partitions.
|
entailment
|
def populate(self,
size,
names_library=None,
reuse_names=False,
random_branches=False,
branch_range=(0, 1),
support_range=(0, 1)):
"""
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
"""
NewNode = self.__class__
if len(self.children) > 1:
connector = NewNode()
for ch in self.get_children():
ch.detach()
connector.add_child(child = ch)
root = NewNode()
self.add_child(child = connector)
self.add_child(child = root)
else:
root = self
next_deq = deque([root])
for i in range(size-1):
if random.randint(0, 1):
p = next_deq.pop()
else:
p = next_deq.popleft()
c1 = p.add_child()
c2 = p.add_child()
next_deq.extend([c1, c2])
if random_branches:
c1.dist = random.uniform(*branch_range)
c2.dist = random.uniform(*branch_range)
c1.support = random.uniform(*branch_range)
c2.support = random.uniform(*branch_range)
else:
c1.dist = 1.0
c2.dist = 1.0
c1.support = 1.0
c2.support = 1.0
# next contains leaf nodes
charset = "abcdefghijklmnopqrstuvwxyz"
if names_library:
names_library = deque(names_library)
else:
avail_names = itertools.combinations_with_replacement(charset, 10)
for n in next_deq:
if names_library:
if reuse_names:
tname = random.sample(names_library, 1)[0]
else:
tname = names_library.pop()
else:
tname = ''.join(next(avail_names))
n.name = tname
|
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
|
entailment
|
def set_outgroup(self, outgroup):
"""
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node.
"""
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
##return
## why raise an error for this?
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree root
n = outgroup
while n.up is not self:
n = n.up
# If outgroup is a child from root, but with more than one
# sister nodes, creates a new node to group them
self.children.remove(n)
if len(self.children) != 1:
down_branch_connector = self.__class__()
down_branch_connector.dist = 0.0
down_branch_connector.support = n.support
for ch in self.get_children():
down_branch_connector.children.append(ch)
ch.up = down_branch_connector
self.children.remove(ch)
else:
down_branch_connector = self.children[0]
# Connects down branch to myself or to outgroup
quien_va_ser_padre = parent_outgroup
if quien_va_ser_padre is not self:
# Parent-child swapping
quien_va_ser_hijo = quien_va_ser_padre.up
quien_fue_padre = None
buffered_dist = quien_va_ser_padre.dist
buffered_support = quien_va_ser_padre.support
while quien_va_ser_hijo is not self:
quien_va_ser_padre.children.append(quien_va_ser_hijo)
quien_va_ser_hijo.children.remove(quien_va_ser_padre)
buffered_dist2 = quien_va_ser_hijo.dist
buffered_support2 = quien_va_ser_hijo.support
quien_va_ser_hijo.dist = buffered_dist
quien_va_ser_hijo.support = buffered_support
buffered_dist = buffered_dist2
buffered_support = buffered_support2
quien_va_ser_padre.up = quien_fue_padre
quien_fue_padre = quien_va_ser_padre
quien_va_ser_padre = quien_va_ser_hijo
quien_va_ser_hijo = quien_va_ser_padre.up
quien_va_ser_padre.children.append(down_branch_connector)
down_branch_connector.up = quien_va_ser_padre
quien_va_ser_padre.up = quien_fue_padre
down_branch_connector.dist += buffered_dist
outgroup2 = parent_outgroup
parent_outgroup.children.remove(outgroup)
outgroup2.dist = 0
else:
outgroup2 = down_branch_connector
outgroup.up = self
outgroup2.up = self
# outgroup is always the first children. Some function my
# trust on this fact, so do no change this.
self.children = [outgroup,outgroup2]
middist = (outgroup2.dist + outgroup.dist)/2
outgroup.dist = middist
outgroup2.dist = middist
outgroup2.support = outgroup.support
|
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node.
|
entailment
|
def unroot(self):
"""
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
"""
if len(self.children)==2:
if not self.children[0].is_leaf():
self.children[0].delete()
elif not self.children[1].is_leaf():
self.children[1].delete()
else:
raise TreeError("Cannot unroot a tree with only two leaves")
|
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
|
entailment
|
def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None):
"""
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
"""
if not attributes:
attributes = ["name"]
# toytree edit:
# removed six dependency for map with comprehension
# node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)]))
_attrlist = [getattr(self, v) for v in attributes if hasattr(self, v)]
node_name = ", ".join([str(i) for i in _attrlist])
LEN = max(3, len(node_name) if not self.children or show_internal else 3)
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
if not self.is_leaf():
mids = []
result = []
for c in self.children:
if len(self.children) == 1:
char2 = '/'
elif c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact, attributes)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = int((lo + hi) / 2)
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + node_name + stem[len(node_name)+1:]
return (result, mid)
else:
return ([char1 + '-' + node_name], 0)
|
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
|
entailment
|
def get_ascii(self, show_internal=True, compact=False, attributes=None):
"""
Returns a string containing an ascii drawing of the tree.
Parameters:
-----------
show_internal:
include internal edge names.
compact:
use exactly one line per tip.
attributes:
A list of node attributes to shown in the ASCII representation.
"""
(lines, mid) = self._asciiArt(show_internal=show_internal,
compact=compact,
attributes=attributes)
return '\n'+'\n'.join(lines)
|
Returns a string containing an ascii drawing of the tree.
Parameters:
-----------
show_internal:
include internal edge names.
compact:
use exactly one line per tip.
attributes:
A list of node attributes to shown in the ASCII representation.
|
entailment
|
def ladderize(self, direction=0):
"""
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
"""
if not self.is_leaf():
n2s = {}
for n in self.get_children():
s = n.ladderize(direction=direction)
n2s[n] = s
self.children.sort(key=lambda x: n2s[x])
if direction == 1:
self.children.reverse()
size = sum(n2s.values())
else:
size = 1
return size
|
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
|
entailment
|
def sort_descendants(self, attr="name"):
"""
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x])))
|
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
|
entailment
|
def get_cached_content(self, store_attr=None, container_type=set, _store=None):
"""
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
Parameters:
-----------
store_attr:
Specifies the node attribute that should be cached (i.e. name,
distance, etc.). When none, the whole node instance is cached.
_store: (internal use)
"""
if _store is None:
_store = {}
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
_store=_store)
if self.children:
val = container_type()
for ch in self.children:
if type(val) == list:
val.extend(_store[ch])
if type(val) == set:
val.update(_store[ch])
_store[self] = val
else:
if store_attr is None:
val = self
else:
val = getattr(self, store_attr)
_store[self] = container_type([val])
return _store
|
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
Parameters:
-----------
store_attr:
Specifies the node attribute that should be cached (i.e. name,
distance, etc.). When none, the whole node instance is cached.
_store: (internal use)
|
entailment
|
def robinson_foulds(self,
t2,
attr_t1="name",
attr_t2="name",
unrooted_trees=False,
expand_polytomies=False,
polytomy_size_limit=5,
skip_large_polytomies=False,
correct_by_polytomy_size=False,
min_support_t1=0.0,
min_support_t2=0.0):
"""
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
Parameters:
-----------
t2:
reference tree
attr_t1:
Compare trees using a custom node attribute as a node name.
attr_t2:
Compare trees using a custom node attribute as a node name in target tree.
attr_t2:
If True, consider trees as unrooted.
False expand_polytomies:
If True, all polytomies in the reference and target tree will be
expanded into all possible binary trees. Robinson-foulds distance
will be calculated between all tree combinations and the minimum
value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
Returns:
--------
(rf, rf_max, common_attrs, names, edges_t1, edges_t2,
discarded_edges_t1, discarded_edges_t2)
"""
ref_t = self
target_t = t2
if not unrooted_trees and (len(ref_t.children) > 2 or len(target_t.children) > 2):
raise TreeError("Unrooted tree found! You may want to activate the unrooted_trees flag.")
if expand_polytomies and correct_by_polytomy_size:
raise TreeError("expand_polytomies and correct_by_polytomy_size are mutually exclusive.")
if expand_polytomies and unrooted_trees:
raise TreeError("expand_polytomies and unrooted_trees arguments cannot be enabled at the same time")
attrs_t1 = set([getattr(n, attr_t1) for n in ref_t.iter_leaves() if hasattr(n, attr_t1)])
attrs_t2 = set([getattr(n, attr_t2) for n in target_t.iter_leaves() if hasattr(n, attr_t2)])
common_attrs = attrs_t1 & attrs_t2
# release mem
attrs_t1, attrs_t2 = None, None
# Check for duplicated items (is it necessary? can we optimize? what's the impact in performance?')
size1 = len([True for n in ref_t.iter_leaves() if getattr(n, attr_t1, None) in common_attrs])
size2 = len([True for n in target_t.iter_leaves() if getattr(n, attr_t2, None) in common_attrs])
if size1 > len(common_attrs):
raise TreeError('Duplicated items found in source tree')
if size2 > len(common_attrs):
raise TreeError('Duplicated items found in reference tree')
if expand_polytomies:
ref_trees = [
TreeNode(nw) for nw in
ref_t.expand_polytomies(
map_attr=attr_t1,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies
)
]
target_trees = [
TreeNode(nw) for nw in
target_t.expand_polytomies(
map_attr=attr_t2,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies,
)
]
attr_t1, attr_t2 = "name", "name"
else:
ref_trees = [ref_t]
target_trees = [target_t]
polytomy_correction = 0
if correct_by_polytomy_size:
corr1 = sum([0]+[len(n.children) - 2 for n in ref_t.traverse() if len(n.children) > 2])
corr2 = sum([0]+[len(n.children) - 2 for n in target_t.traverse() if len(n.children) > 2])
if corr1 and corr2:
raise TreeError("Both trees contain polytomies! Try expand_polytomies=True instead")
else:
polytomy_correction = max([corr1, corr2])
min_comparison = None
for t1 in ref_trees:
t1_content = t1.get_cached_content()
t1_leaves = t1_content[t1]
if unrooted_trees:
edges1 = set([
tuple(sorted([tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])),
tuple(sorted([getattr(n, attr_t1) for n in t1_leaves-content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))]))
for content in six.itervalues(t1_content)])
edges1.discard(((),()))
else:
edges1 = set([
tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))
for content in six.itervalues(t1_content)])
edges1.discard(())
if min_support_t1:
support_t1 = dict([
(tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])), branch.support)
for branch, content in six.iteritems(t1_content)])
for t2 in target_trees:
t2_content = t2.get_cached_content()
t2_leaves = t2_content[t2]
if unrooted_trees:
edges2 = set([
tuple(sorted([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs])),
tuple(sorted([getattr(n, attr_t2) for n in t2_leaves-content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))]))
for content in six.itervalues(t2_content)])
edges2.discard(((),()))
else:
edges2 = set([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))
for content in six.itervalues(t2_content)])
edges2.discard(())
if min_support_t2:
support_t2 = dict([
(tuple(sorted(([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))), branch.support)
for branch, content in six.iteritems(t2_content)])
# if a support value is passed as a constraint, discard lowly supported branches from the analysis
discard_t1, discard_t2 = set(), set()
if min_support_t1 and unrooted_trees:
discard_t1 = set([p for p in edges1 if support_t1.get(p[0], support_t1.get(p[1], 999999999)) < min_support_t1])
elif min_support_t1:
discard_t1 = set([p for p in edges1 if support_t1[p] < min_support_t1])
if min_support_t2 and unrooted_trees:
discard_t2 = set([p for p in edges2 if support_t2.get(p[0], support_t2.get(p[1], 999999999)) < min_support_t2])
elif min_support_t2:
discard_t2 = set([p for p in edges2 if support_t2[p] < min_support_t2])
#rf = len(edges1 ^ edges2) - (len(discard_t1) + len(discard_t2)) - polytomy_correction # poly_corr is 0 if the flag is not enabled
#rf = len((edges1-discard_t1) ^ (edges2-discard_t2)) - polytomy_correction
# the two root edges are never counted here, as they are always
# present in both trees because of the common attr filters
rf = len(((edges1 ^ edges2) - discard_t2) - discard_t1) - polytomy_correction
if unrooted_trees:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 6 - len(discard_t1) - len(discard_t2)
max_parts = (len([p for p in edges1 - discard_t1 if len(p[0])>1 and len(p[1])>1]) +
len([p for p in edges2 - discard_t2 if len(p[0])>1 and len(p[1])>1]))
else:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 4 - len(discard_t1) - len(discard_t2)
# Otherwise we need to count the actual number of valid
# partitions in each tree -2 is to avoid counting the root
# partition of the two trees (only needed in rooted trees)
max_parts = (len([p for p in edges1 - discard_t1 if len(p)>1]) +
len([p for p in edges2 - discard_t2 if len(p)>1])) - 2
# print max_parts
if not min_comparison or min_comparison[0] > rf:
min_comparison = [rf, max_parts, common_attrs, edges1, edges2, discard_t1, discard_t2]
return min_comparison
|
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
Parameters:
-----------
t2:
reference tree
attr_t1:
Compare trees using a custom node attribute as a node name.
attr_t2:
Compare trees using a custom node attribute as a node name in target tree.
attr_t2:
If True, consider trees as unrooted.
False expand_polytomies:
If True, all polytomies in the reference and target tree will be
expanded into all possible binary trees. Robinson-foulds distance
will be calculated between all tree combinations and the minimum
value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
Returns:
--------
(rf, rf_max, common_attrs, names, edges_t1, edges_t2,
discarded_edges_t1, discarded_edges_t2)
|
entailment
|
def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
"""
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves - side1)
|
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
|
entailment
|
def get_topology_id(self, attr="name"):
"""
Returns the unique ID representing the topology of the current tree.
Two trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch
of trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names.
Any other node attribute could be used instead.
"""
edge_keys = []
for s1, s2 in self.get_edges():
k1 = sorted([getattr(e, attr) for e in s1])
k2 = sorted([getattr(e, attr) for e in s2])
edge_keys.append(sorted([k1, k2]))
return md5(str(sorted(edge_keys)).encode('utf-8')).hexdigest()
|
Returns the unique ID representing the topology of the current tree.
Two trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch
of trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names.
Any other node attribute could be used instead.
|
entailment
|
def check_monophyly(self,
values,
target_attr,
ignore_missing=False,
unrooted=False):
"""
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves
|
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
|
entailment
|
def get_monophyletic(self, values, target_attr):
"""
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match
|
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
|
entailment
|
def expand_polytomies(self,
map_attr="name",
polytomy_size_limit=5,
skip_large_polytomies=False):
"""
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
"""
class TipTuple(tuple):
pass
def add_leaf(tree, label):
yield (label, tree)
if not isinstance(tree, TipTuple) and isinstance(tree, tuple):
for left in add_leaf(tree[0], label):
yield (left, tree[1])
for right in add_leaf(tree[1], label):
yield (tree[0], right)
def enum_unordered(labels):
if len(labels) == 1:
yield labels[0]
else:
for tree in enum_unordered(labels[1:]):
for new_tree in add_leaf(tree, labels[0]):
yield new_tree
n2subtrees = {}
for n in self.traverse("postorder"):
if n.is_leaf():
subtrees = [getattr(n, map_attr)]
else:
subtrees = []
if len(n.children) > polytomy_size_limit:
if skip_large_polytomies:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.append(TipTuple(childtrees))
else:
raise TreeError("Found polytomy larger than current limit: %s" %n)
else:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.extend([TipTuple(subtree) for subtree in enum_unordered(childtrees)])
n2subtrees[n] = subtrees
return ["%s;"%str(nw) for nw in n2subtrees[self]]
|
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
|
entailment
|
def resolve_polytomy(self,
default_dist=0.0,
default_support=0.0,
recursive=True):
"""
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
"""
def _resolve(node):
if len(node.children) > 2:
children = list(node.children)
node.children = []
next_node = root = node
for i in range(len(children) - 2):
next_node = next_node.add_child()
next_node.dist = default_dist
next_node.support = default_support
next_node = root
for ch in children:
next_node.add_child(ch)
if ch != children[-2]:
next_node = next_node.children[0]
target = [self]
if recursive:
target.extend([n for n in self.get_descendants()])
for n in target:
_resolve(n)
|
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
|
entailment
|
def truncate_empty_lines(lines):
"""
Removes all empty lines from above and below the text.
We can't just use text.strip() because that would remove the leading
space for the table.
Parameters
----------
lines : list of str
Returns
-------
lines : list of str
The text lines without empty lines above or below
"""
while lines[0].rstrip() == '':
lines.pop(0)
while lines[len(lines) - 1].rstrip() == '':
lines.pop(-1)
return lines
|
Removes all empty lines from above and below the text.
We can't just use text.strip() because that would remove the leading
space for the table.
Parameters
----------
lines : list of str
Returns
-------
lines : list of str
The text lines without empty lines above or below
|
entailment
|
def jstimestamp_slow(dte):
'''Convert a date or datetime object into a javsacript timestamp'''
year, month, day, hour, minute, second = dte.timetuple()[:6]
days = date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
if isinstance(dte,datetime):
return 1000*seconds + 0.001*dte.microsecond
else:
return 1000*seconds
|
Convert a date or datetime object into a javsacript timestamp
|
entailment
|
def jstimestamp(dte):
'''Convert a date or datetime object into a javsacript timestamp.'''
days = date(dte.year, dte.month, 1).toordinal() - _EPOCH_ORD + dte.day - 1
hours = days*24
if isinstance(dte,datetime):
hours += dte.hour
minutes = hours*60 + dte.minute
seconds = minutes*60 + dte.second
return 1000*seconds + int(0.001*dte.microsecond)
else:
return 3600000*hours
|
Convert a date or datetime object into a javsacript timestamp.
|
entailment
|
def html2rst(html_string, force_headers=False, center_cells=False,
center_headers=False):
"""
Convert a string or html file to an rst table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
force_headers : bool
Make the first row become headers, whether or not they are
headers in the html file.
center_cells : bool
Whether or not to center the contents of the cells
center_headers : bool
Whether or not to center the contents of the header cells
Returns
-------
str
The html table converted to an rst grid table
Notes
-----
This function **requires** BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... <ul>
... <li>List item 1</li>
... <li>List item 2</li>
... </ul>
... </td>
... <td>
... <ol>
... <li>Ordered 1</li>
... <li>Ordered 2</li>
... </ol>
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2rst(html_text))
+---------------------+----------------+--------------+
| Header 1 | Header 2 | Header 3 |
+=====================+================+==============+
| This is a paragraph | - List item 1 | #. Ordered 1 |
| | - List item 2 | #. Ordered 2 |
+---------------------+----------------+--------------+
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
"""
if os.path.isfile(html_string):
file = open(html_string, 'r', encoding='utf-8')
lines = file.readlines()
file.close()
html_string = ''.join(lines)
table_data, spans, use_headers = html2data(
html_string)
if table_data == '':
return ''
if force_headers:
use_headers = True
return data2rst(table_data, spans, use_headers, center_cells, center_headers)
|
Convert a string or html file to an rst table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
force_headers : bool
Make the first row become headers, whether or not they are
headers in the html file.
center_cells : bool
Whether or not to center the contents of the cells
center_headers : bool
Whether or not to center the contents of the header cells
Returns
-------
str
The html table converted to an rst grid table
Notes
-----
This function **requires** BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... <ul>
... <li>List item 1</li>
... <li>List item 2</li>
... </ul>
... </td>
... <td>
... <ol>
... <li>Ordered 1</li>
... <li>Ordered 2</li>
... </ol>
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2rst(html_text))
+---------------------+----------------+--------------+
| Header 1 | Header 2 | Header 3 |
+=====================+================+==============+
| This is a paragraph | - List item 1 | #. Ordered 1 |
| | - List item 2 | #. Ordered 2 |
+---------------------+----------------+--------------+
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
|
entailment
|
def make_span(row, column, extra_rows, extra_columns):
"""
Create a list of rows and columns that will make up a span
Parameters
----------
row : int
The row of the first cell in the span
column : int
The column of the first cell in the span
extra_rows : int
The number of rows that make up the span
extra_columns : int
The number of columns that make up the span
Returns
-------
span : list of lists of int
A span is a list of [row, column] pairs that make up a span
"""
span = [[row, column]]
for r in range(row, row + extra_rows + 1):
span.append([r, column])
for c in range(column, column + extra_columns + 1):
span.append([row, c])
span.append([r, c])
return span
|
Create a list of rows and columns that will make up a span
Parameters
----------
row : int
The row of the first cell in the span
column : int
The column of the first cell in the span
extra_rows : int
The number of rows that make up the span
extra_columns : int
The number of columns that make up the span
Returns
-------
span : list of lists of int
A span is a list of [row, column] pairs that make up a span
|
entailment
|
def make_cell(table, span, widths, heights, use_headers):
"""
Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell
"""
width = get_span_char_width(span, widths)
height = get_span_char_height(span, heights)
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
lines = text.split("\n")
for i in range(len(lines)):
width_difference = width - len(lines[i])
lines[i] = ''.join([lines[i], " " * width_difference])
height_difference = height - len(lines)
empty_lines = []
for i in range(0, height_difference):
empty_lines.append(" " * width)
lines.extend(empty_lines)
output = [
''.join(["+", (width * "-") + "+"])
]
for i in range(0, height):
output.append("|" + lines[i] + "|")
if use_headers and span[0][0] == 0:
symbol = "="
else:
symbol = "-"
output.append(
''.join(["+", width * symbol, "+"])
)
text = "\n".join(output)
row_count = get_span_row_count(span)
column_count = get_span_column_count(span)
cell = Cell(text, text_row, text_column, row_count, column_count)
return cell
|
Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell
|
entailment
|
def init_app(self, app, **kwargs):
"""Initialize application object."""
self.init_db(app, **kwargs)
app.config.setdefault('ALEMBIC', {
'script_location': pkg_resources.resource_filename(
'invenio_db', 'alembic'
),
'version_locations': [
(base_entry.name, pkg_resources.resource_filename(
base_entry.module_name, os.path.join(*base_entry.attrs)
)) for base_entry in pkg_resources.iter_entry_points(
'invenio_db.alembic'
)
],
})
self.alembic.init_app(app)
app.extensions['invenio-db'] = self
app.cli.add_command(db_cmd)
|
Initialize application object.
|
entailment
|
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager)
|
Initialize Flask-SQLAlchemy extension.
|
entailment
|
def init_versioning(self, app, database, versioning_manager=None):
"""Initialize the versioning support using SQLAlchemy-Continuum."""
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
)
|
Initialize the versioning support using SQLAlchemy-Continuum.
|
entailment
|
def extract_table(html_string, row_count, column_count):
"""
Convert an html string to data table
Parameters
----------
html_string : str
row_count : int
column_count : int
Returns
-------
data_table : list of lists of str
"""
try:
from bs4 import BeautifulSoup
from bs4.element import Tag
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
#html_string = convertRichText(html_string)
data_table = []
for row in range(0, row_count):
data_table.append([])
for column in range(0, column_count):
data_table[-1].append(None)
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return ''
trs = table.findAll('tr')
if len(trs) == 0:
return [['']]
for tr in range(len(trs)):
ths = trs[tr].findAll('th')
if len(ths) == 0:
tds = trs[tr].findAll('td')
else:
tds = ths
if len(tds) == 0:
tds = []
for i in range(0, column_count):
tds.append(Tag("", name=""))
for i in range(len(tds)):
td = tds[i]
row, column = find_unassigned_table_cell(data_table)
r_span_count = 1
c_span_count = 1
if td.has_attr('rowspan'):
r_span_count = int(td['rowspan'])
if td.has_attr('colspan'):
c_span_count = int(td['colspan'])
for row_prime in range(row, row + r_span_count):
for column_prime in range(column, column + c_span_count):
if row_prime == row and column_prime == column:
items = []
for item in td.contents:
items.append(str(item))
string = ''.join(items).strip()
text = restructify(string).rstrip()
data_table[row_prime][column_prime] = text
else:
data_table[row_prime][column_prime] = ""
if i + 1 < column_count and i == len(tds) - 1:
for x in range(len(tds), column_count):
if data_table[row][x] is None:
data_table[row][x] = ""
for row in range(len(data_table)):
for column in range(len(data_table[row])):
if not data_table[row][column]:
data_table[row][column] = ""
return data_table
|
Convert an html string to data table
Parameters
----------
html_string : str
row_count : int
column_count : int
Returns
-------
data_table : list of lists of str
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.