_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q45700
authenticate
train
def authenticate(): """ Authenticate the user and store the 'token' for further use Return the authentication 'token' """ print LOGIN_INIT_MESSAGE username = raw_input('{0}: '.format(LOGIN_USER_MESSAGE)) password = None while password is None: password = getpass('Password for {0}: '.format(username)) gh = login(username, password=password) try: gh.user() instance = authorize(username, password, APP_SCOPE, APP_DESC, APP_URL) except Exception, e: raise e with open(credentials_file, 'w') as f: f.write(instance.token) return instance.token
python
{ "resource": "" }
q45701
community_colors
train
def community_colors(n): """ Returns a list of visually separable colors according to total communities """ if (n > 0): colors = cl.scales['12']['qual']['Paired'] shuffle(colors) return colors[:n] else: return choice(cl.scales['12']['qual']['Paired'])
python
{ "resource": "" }
q45702
login_as_bot
train
def login_as_bot(): """ Login as the bot account "octogrid", if user isn't authenticated on Plotly """ plotly_credentials_file = join( join(expanduser('~'), PLOTLY_DIRECTORY), PLOTLY_CREDENTIALS_FILENAME) if isfile(plotly_credentials_file): with open(plotly_credentials_file, 'r') as f: credentials = loads(f.read()) if (credentials['username'] == '' or credentials['api_key'] == ''): plotly.sign_in(BOT_USERNAME, BOT_API_KEY) else: plotly.sign_in(BOT_USERNAME, BOT_API_KEY)
python
{ "resource": "" }
q45703
SlurmProvider.submit
train
def submit(self, command, blocksize, job_name="parsl.auto"): """Submit the command as a slurm job of blocksize parallel elements. Parameters ---------- command : str Command to be made on the remote side. blocksize : int Not implemented. job_name : str Name for the job (must be unique). Returns ------- None or str If at capacity, returns None; otherwise, a string identifier for the job """ if self.provisioned_blocks >= self.max_blocks: logger.warn("Slurm provider '{}' is at capacity (no more blocks will be added)".format(self.label)) return None job_name = "{0}.{1}".format(job_name, time.time()) script_path = "{0}/{1}.submit".format(self.script_dir, job_name) script_path = os.path.abspath(script_path) logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block)) job_config = {} job_config["submit_script_dir"] = self.channel.script_dir job_config["nodes"] = self.nodes_per_block job_config["tasks_per_node"] = self.tasks_per_node job_config["walltime"] = wtime_to_minutes(self.walltime) job_config["overrides"] = self.overrides job_config["partition"] = self.partition job_config["user_script"] = command # Wrap the command job_config["user_script"] = self.launcher(command, self.tasks_per_node, self.nodes_per_block) logger.debug("Writing submit script") self._write_submit_script(template_string, script_path, job_name, job_config) channel_script_path = self.channel.push_file(script_path, self.channel.script_dir) retcode, stdout, stderr = super().execute_wait("sbatch {0}".format(channel_script_path)) job_id = None if retcode == 0: for line in stdout.split('\n'): if line.startswith("Submitted batch job"): job_id = line.split("Submitted batch job")[1].strip() self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize} else: print("Submission of command to scale_out failed") logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip()) return job_id
python
{ "resource": "" }
q45704
upload_to
train
def upload_to(instance, filename, prefix=None): """ Auto upload function for File and Image fields. """ ext = path.splitext(filename)[1] name = str(instance.pk or time()) + filename # We think that we use utf8 based OS file system filename = md5(name.encode('utf8')).hexdigest() + ext basedir = path.join(instance._meta.app_label, instance._meta.module_name) if prefix: basedir = path.join(basedir, prefix) return path.join(basedir, filename[:2], filename[2:4], filename)
python
{ "resource": "" }
q45705
reverse_toctree
train
def reverse_toctree(app, doctree, docname): """Reverse the order of entries in the root toctree if 'glob' is used.""" if docname == "changes": for node in doctree.traverse(): if node.tagname == "toctree" and node.get("glob"): node["entries"].reverse() break
python
{ "resource": "" }
q45706
treat
train
def treat(request_body): """ Treat a notification and guarantee its authenticity. :param request_body: The request body in plain text. :type request_body: string :return: A safe APIResource :rtype: APIResource """ # Python 3+ support if isinstance(request_body, six.binary_type): request_body = request_body.decode('utf-8') try: data = json.loads(request_body) except ValueError: raise exceptions.UnknownAPIResource('Request body is malformed JSON.') unsafe_api_resource = APIResource.factory(data) try: consistent_api_resource = unsafe_api_resource.get_consistent_resource() except AttributeError: raise exceptions.UnknownAPIResource('The API resource provided is invalid.') return consistent_api_resource
python
{ "resource": "" }
q45707
LocalChannel.execute_no_wait
train
def execute_no_wait(self, cmd, walltime, envs={}): ''' Synchronously execute a commandline string on the shell. Args: - cmd (string) : Commandline string to execute - walltime (int) : walltime in seconds, this is not really used now. Returns: - retcode : Return code from the execution, -1 on fail - stdout : stdout string - stderr : stderr string Raises: None. ''' current_env = copy.deepcopy(self._envs) current_env.update(envs) try: proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.userhome, env=current_env, shell=True, preexec_fn=os.setpgrp ) pid = proc.pid except Exception as e: print("Caught exception : {0}".format(e)) logger.warn("Execution of command [%s] failed due to \n %s ", (cmd, e)) return pid, proc
python
{ "resource": "" }
q45708
Client.trade_history
train
def trade_history( self, from_=None, count=None, from_id=None, end_id=None, order=None, since=None, end=None, pair=None ): """ Returns trade history. To use this method you need a privilege of the info key. :param int or None from_: trade ID, from which the display starts (default 0) :param int or None count: the number of trades for display (default 1000) :param int or None from_id: trade ID, from which the display starts (default 0) :param int or None end_id: trade ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.) :param str or None pair: pair to be displayed (ex. 'btc_usd') """ return self._trade_api_call( 'TradeHistory', from_=from_, count=count, from_id=from_id, end_id=end_id, order=order, since=since, end=end, pair=pair )
python
{ "resource": "" }
q45709
Client.trans_history
train
def trans_history( self, from_=None, count=None, from_id=None, end_id=None, order=None, since=None, end=None ): """ Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.) """ return self._trade_api_call( 'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id, order=order, since=since, end=end )
python
{ "resource": "" }
q45710
_get_python_version_string
train
def _get_python_version_string(): """ Returns a string representation of the Python version. :return: "2.7.8" if python version is 2.7.8. :rtype string """ version_info = sys.version_info return '.'.join(map(str, [version_info[0], version_info[1], version_info[2]]))
python
{ "resource": "" }
q45711
HttpClient._request
train
def _request(self, http_verb, url, data=None, authenticated=True): """ Perform an HTTP request. See https://docs.python.org/3/library/json.html#json-to-py-table for the http response object. :param http_verb: the HTTP verb (GET, POST, PUT, …) :type http_verb: string :param url: the path to the resource queried :type url: string :param data: the request content :type data: dict :param authenticated: the request should be authenticated :type authenticated: bool :return: http response, http status :rtype tuple(object, int) """ user_agent = ('PayPlug-Python/{lib_version} (Python/{python_version}; ' '{request_library})' .format(lib_version=__version__, python_version=_get_python_version_string(), request_library=self._request_handler.get_useragent_string())) headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'User-Agent': user_agent, } if authenticated: headers['Authorization'] = 'Bearer ' + self._secret_key requestor = self._request_handler() response, status, _ = requestor.do_request(http_verb, url, headers, data) # Since Python 3.2+, response body is a bytes-like object. We have to decode it to a string. if isinstance(response, six.binary_type): response = response.decode('utf-8') if not 200 <= status < 300: raise exceptions.HttpError.map_http_status_to_exception(status)(http_response=response, http_status=status) try: response_object = json.loads(response) except ValueError: raise exceptions.UnexpectedAPIResponseException(http_response=response, http_status=status) return response_object, status
python
{ "resource": "" }
q45712
find_cache_directory
train
def find_cache_directory(remote): """ Find the directory where temporary local checkouts are to be stored. :returns: The absolute pathname of a directory (a string). """ return os.path.join('/var/cache/vcs-repo-mgr' if os.access('/var/cache', os.W_OK) else tempfile.gettempdir(), urlparse.quote(remote, safe=''))
python
{ "resource": "" }
q45713
find_configured_repository
train
def find_configured_repository(name): """ Find a version control repository defined by the user in a configuration file. :param name: The name of the repository (a string). :returns: A :class:`Repository` object. :raises: :exc:`~vcs_repo_mgr.exceptions.NoSuchRepositoryError` when the given repository name doesn't match any of the configured repositories. :raises: :exc:`~vcs_repo_mgr.exceptions.AmbiguousRepositoryNameError` when the given repository name is ambiguous (i.e. it matches multiple repository names). :raises: :exc:`~vcs_repo_mgr.exceptions.UnknownRepositoryTypeError` when a repository definition with an unknown type is encountered. The following configuration files are supported: 1. ``/etc/vcs-repo-mgr.ini`` 2. ``~/.vcs-repo-mgr.ini`` Repositories defined in the second file override repositories defined in the first. Here is an example of a repository definition: .. code-block:: ini [vcs-repo-mgr] type = git local = ~/projects/vcs-repo-mgr remote = git@github.com:xolox/python-vcs-repo-mgr.git bare = true release-scheme = tags release-filter = .* Three VCS types are currently supported: ``hg`` (``mercurial`` is also accepted), ``git`` and ``bzr`` (``bazaar`` is also accepted). """ parser = configparser.RawConfigParser() for config_file in [SYSTEM_CONFIG_FILE, USER_CONFIG_FILE]: config_file = parse_path(config_file) if os.path.isfile(config_file): logger.debug("Loading configuration file (%s) ..", format_path(config_file)) parser.read(config_file) matching_repos = [r for r in parser.sections() if normalize_name(name) == normalize_name(r)] if not matching_repos: msg = "No repositories found matching the name '%s'!" raise NoSuchRepositoryError(msg % name) elif len(matching_repos) != 1: msg = "Multiple repositories found matching the name '%s'! (matches: %s)" raise AmbiguousRepositoryNameError(msg % (name, concatenate(map(repr, matching_repos)))) else: kw = {} # Get the repository specific options. options = dict(parser.items(matching_repos[0])) vcs_type = options.get('type', '').lower() # Process the `local' directory pathname. local_path = options.get('local') if local_path: # Expand a leading tilde and/or environment variables. kw['local'] = parse_path(local_path) # Process the `bare' option. bare = options.get('bare', None) if bare is not None: # Default to bare=None but enable configuration # file(s) to enforce bare=True or bare=False. kw['bare'] = coerce_boolean(bare) # Process the `remote', `release_scheme' and `release_filter' options. for name in 'remote', 'release-scheme', 'release-filter': value = options.get(name) if value is not None: kw[name.replace('-', '_')] = value return repository_factory(vcs_type, **kw)
python
{ "resource": "" }
q45714
Repository.release_scheme
train
def release_scheme(self, value): """Validate the release scheme.""" if value not in KNOWN_RELEASE_SCHEMES: msg = "Release scheme %r is not supported! (valid options are %s)" raise ValueError(msg % (value, concatenate(map(repr, KNOWN_RELEASE_SCHEMES)))) set_property(self, 'release_scheme', value)
python
{ "resource": "" }
q45715
Repository.checkout
train
def checkout(self, revision=None, clean=False): """ Update the working tree of the local repository to the specified revision. :param revision: The revision to check out (a string, defaults to :attr:`default_revision`). :param clean: :data:`True` to discard changes in the working tree, :data:`False` otherwise. """ # Make sure the local repository exists and supports a working tree. self.create() self.ensure_working_tree() # Update the working tree of the local repository. revision = revision or self.default_revision logger.info("Checking out revision '%s' in %s ..", revision, format_path(self.local)) self.context.execute(*self.get_checkout_command(revision, clean))
python
{ "resource": "" }
q45716
Repository.commit
train
def commit(self, message, author=None): """ Commit changes to tracked files in the working tree. :param message: The commit message (a string). :param author: Override :attr:`author` (refer to :func:`coerce_author()` for details on argument handling). """ # Make sure the local repository exists and supports a working tree. self.ensure_exists() self.ensure_working_tree() logger.info("Committing changes in %s: %s", format_path(self.local), message) author = coerce_author(author) if author else self.author self.context.execute(*self.get_commit_command(message, author))
python
{ "resource": "" }
q45717
Repository.create_branch
train
def create_branch(self, branch_name): """ Create a new branch based on the working tree's revision. :param branch_name: The name of the branch to create (a string). This method automatically checks out the new branch, but note that the new branch may not actually exist until a commit has been made on the branch. """ # Make sure the local repository exists and supports a working tree. self.create() self.ensure_working_tree() # Create the new branch in the local repository. logger.info("Creating branch '%s' in %s ..", branch_name, format_path(self.local)) self.context.execute(*self.get_create_branch_command(branch_name))
python
{ "resource": "" }
q45718
Repository.create_tag
train
def create_tag(self, tag_name): """ Create a new tag based on the working tree's revision. :param tag_name: The name of the tag to create (a string). """ # Make sure the local repository exists and supports a working tree. self.create() self.ensure_working_tree() # Create the new tag in the local repository. logger.info("Creating tag '%s' in %s ..", tag_name, format_path(self.local)) self.context.execute(*self.get_create_tag_command(tag_name))
python
{ "resource": "" }
q45719
Repository.delete_branch
train
def delete_branch(self, branch_name, message=None, author=None): """ Delete or close a branch in the local repository. :param branch_name: The name of the branch to delete or close (a string). :param message: The message to use when closing the branch requires a commit (a string or :data:`None`, defaults to the string "Closing branch NAME"). :param author: Override :attr:`author` (refer to :func:`coerce_author()` for details on argument handling). """ # Make sure the local repository exists. self.create() # Delete the branch in the local repository. logger.info("Deleting branch '%s' in %s ..", branch_name, format_path(self.local)) self.context.execute(*self.get_delete_branch_command( author=(coerce_author(author) if author else self.author), message=(message or ("Closing branch %s" % branch_name)), branch_name=branch_name, ))
python
{ "resource": "" }
q45720
Repository.ensure_exists
train
def ensure_exists(self): """ Make sure the local repository exists. :raises: :exc:`~exceptions.ValueError` when the local repository doesn't exist yet. """ if not self.exists: msg = "The local %s repository %s doesn't exist!" raise ValueError(msg % (self.friendly_name, format_path(self.local)))
python
{ "resource": "" }
q45721
Repository.ensure_hexadecimal_string
train
def ensure_hexadecimal_string(self, value, command=None): """ Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string. """ if not HEX_PATTERN.match(value): msg = "Expected a hexadecimal string, got '%s' instead!" if command: msg += " ('%s' gave unexpected output)" msg %= (value, command) else: msg %= value raise ValueError(msg) return value
python
{ "resource": "" }
q45722
Repository.ensure_release_scheme
train
def ensure_release_scheme(self, expected_scheme): """ Make sure the release scheme is correctly configured. :param expected_scheme: The expected release scheme (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` doesn't match the expected release scheme. """ if self.release_scheme != expected_scheme: msg = "Repository isn't using '%s' release scheme!" raise TypeError(msg % expected_scheme)
python
{ "resource": "" }
q45723
Repository.export
train
def export(self, directory, revision=None): """ Export the complete tree from the local version control repository. :param directory: The directory where the tree should be exported (a string). :param revision: The revision to export (a string or :data:`None`, defaults to :attr:`default_revision`). """ # Make sure we're dealing with an absolute pathname (because a relative # pathname would be interpreted as relative to the repository's main # directory, which isn't necessarily what the caller expects). directory = os.path.abspath(directory) # Make sure the local repository exists. self.create() # Export the tree from the local repository. timer = Timer() revision = revision or self.default_revision logger.info("Exporting revision '%s' in %s to %s ..", revision, format_path(self.local), directory) self.context.execute('mkdir', '-p', directory) self.context.execute(*self.get_export_command(directory, revision)) logger.debug("Took %s to pull changes from remote %s repository.", timer, self.friendly_name)
python
{ "resource": "" }
q45724
Repository.find_remote
train
def find_remote(self, default=False, name=None, role=None): """ Find a remote repository connected to the local repository. :param default: :data:`True` to only look for default remotes, :data:`False` otherwise. :param name: The name of the remote to look for (a string or :data:`None`). :param role: A role that the remote should have (a string or :data:`None`). :returns: A :class:`Remote` object or :data:`None`. """ for remote in self.known_remotes: if ((remote.default if default else True) and (remote.name == name if name else True) and (role in remote.roles if role else True)): return remote
python
{ "resource": "" }
q45725
Repository.generate_control_field
train
def generate_control_field(self, revision=None): """ Generate a Debian control file field referring for this repository and revision. :param revision: A reference to a revision, most likely the name of a branch (a string, defaults to :attr:`default_revision`). :returns: A tuple with two strings: The name of the field and the value. This generates a `Vcs-Bzr` field for Bazaar repositories, a `Vcs-Git` field for Git repositories and a `Vcs-Hg` field for Mercurial repositories. Here's an example based on the public git repository of the `vcs-repo-mgr` project: >>> from vcs_repo_mgr import coerce_repository >>> repository = coerce_repository('https://github.com/xolox/python-vcs-repo-mgr.git') >>> repository.generate_control_field() ('Vcs-Git', 'https://github.com/xolox/python-vcs-repo-mgr.git#b617731b6c0ca746665f597d2f24b8814b137ebc') """ value = "%s#%s" % (self.remote or self.local, self.find_revision_id(revision)) return self.control_field, value
python
{ "resource": "" }
q45726
Repository.interactive_merge_conflict_handler
train
def interactive_merge_conflict_handler(self, exception): """ Give the operator a chance to interactively resolve merge conflicts. :param exception: An :exc:`~executor.ExternalCommandFailed` object. :returns: :data:`True` if the operator has interactively resolved any merge conflicts (and as such the merge error doesn't need to be propagated), :data:`False` otherwise. This method checks whether :data:`sys.stdin` is connected to a terminal to decide whether interaction with an operator is possible. If it is then an interactive terminal prompt is used to ask the operator to resolve the merge conflict(s). If the operator confirms the prompt, the merge error is swallowed instead of propagated. When :data:`sys.stdin` is not connected to a terminal or the operator denies the prompt the merge error is propagated. """ if connected_to_terminal(sys.stdin): logger.info(compact(""" It seems that I'm connected to a terminal so I'll give you a chance to interactively fix the merge conflict(s) in order to avoid propagating the merge error. Please mark or stage your changes but don't commit the result just yet (it will be done for you). """)) while True: if prompt_for_confirmation("Ignore merge error because you've resolved all conflicts?"): if self.merge_conflicts: logger.warning("I'm still seeing merge conflicts, please double check! (%s)", concatenate(self.merge_conflicts)) else: # The operator resolved all conflicts. return True else: # The operator wants us to propagate the error. break return False
python
{ "resource": "" }
q45727
Repository.is_feature_branch
train
def is_feature_branch(self, branch_name): """ Try to determine whether a branch name refers to a feature branch. :param branch_name: The name of a branch (a string). :returns: :data:`True` if the branch name appears to refer to a feature branch, :data:`False` otherwise. This method is used by :func:`merge_up()` to determine whether the feature branch that was merged should be deleted or closed. If the branch name matches :attr:`default_revision` or one of the branch names of the :attr:`releases` then it is not considered a feature branch, which means it won't be closed. """ # The following checks are intentionally ordered from lightweight to heavyweight. if branch_name == self.default_revision: # The default branch is never a feature branch. return False elif branch_name not in self.branches: # Invalid branch names can't be feature branch names. return False elif self.release_scheme == 'branches' and branch_name in self.release_branches: # Release branches are not feature branches. return False else: # Other valid branches are considered feature branches. return True
python
{ "resource": "" }
q45728
Repository.merge_up
train
def merge_up(self, target_branch=None, feature_branch=None, delete=True, create=True): """ Merge a change into one or more release branches and the default branch. :param target_branch: The name of the release branch where merging of the feature branch starts (a string or :data:`None`, defaults to :attr:`current_branch`). :param feature_branch: The feature branch to merge in (any value accepted by :func:`coerce_feature_branch()`). :param delete: :data:`True` (the default) to delete or close the feature branch after it is merged, :data:`False` otherwise. :param create: :data:`True` to automatically create the target branch when it doesn't exist yet, :data:`False` otherwise. :returns: If `feature_branch` is given the global revision id of the feature branch is returned, otherwise the global revision id of the target branch (before any merges performed by :func:`merge_up()`) is returned. If the target branch is created by :func:`merge_up()` and `feature_branch` isn't given then :data:`None` is returned. :raises: The following exceptions can be raised: - :exc:`~exceptions.TypeError` when `target_branch` and :attr:`current_branch` are both :data:`None`. - :exc:`~exceptions.ValueError` when the given target branch doesn't exist (based on :attr:`branches`) and `create` is :data:`False`. - :exc:`~executor.ExternalCommandFailed` if a command fails. """ timer = Timer() repository_was_created = self.create() revision_to_merge = None # Default the target branch to the current branch. if not target_branch: target_branch = self.current_branch if not target_branch: raise TypeError("You need to specify the target branch! (where merging starts)") # Parse the feature branch specification. feature_branch = coerce_feature_branch(feature_branch) if feature_branch else None # Make sure we start with a clean working tree. self.ensure_clean() # Make sure we're up to date with our upstream repository (if any). if not repository_was_created: self.pull() # Checkout or create the target branch. logger.debug("Checking if target branch exists (%s) ..", target_branch) if target_branch in self.branches: self.checkout(revision=target_branch) # Get the global revision id of the release branch we're about to merge. revision_to_merge = self.find_revision_id(target_branch) elif not create: raise ValueError("The target branch %r doesn't exist!" % target_branch) elif self.compiled_filter.match(target_branch): self.create_release_branch(target_branch) else: self.create_branch(target_branch) # Check if we need to merge in a feature branch. if feature_branch: if feature_branch.location: # Pull in the feature branch. self.pull(remote=feature_branch.location, revision=feature_branch.revision) # Get the global revision id of the feature branch we're about to merge. revision_to_merge = self.find_revision_id(feature_branch.revision) # Merge in the feature branch. self.merge(revision=feature_branch.revision) # Commit the merge. self.commit(message="Merged %s" % feature_branch.expression) # We skip merging up through release branches when the target branch is # the default branch (in other words, there's nothing to merge up). if target_branch != self.default_revision: # Find the release branches in the repository. release_branches = [release.revision.branch for release in self.ordered_releases] logger.debug("Found %s: %s", pluralize(len(release_branches), "release branch", "release branches"), concatenate(release_branches)) # Find the release branches after the target branch. later_branches = release_branches[release_branches.index(target_branch) + 1:] logger.info("Found %s after target branch (%s): %s", pluralize(len(later_branches), "release branch", "release branches"), target_branch, concatenate(later_branches)) # Determine the branches that need to be merged. branches_to_upmerge = later_branches + [self.default_revision] logger.info("Merging up from '%s' to %s: %s", target_branch, pluralize(len(branches_to_upmerge), "branch", "branches"), concatenate(branches_to_upmerge)) # Merge the feature branch up through the selected branches. merge_queue = [target_branch] + branches_to_upmerge while len(merge_queue) >= 2: from_branch = merge_queue[0] to_branch = merge_queue[1] logger.info("Merging '%s' into '%s' ..", from_branch, to_branch) self.checkout(revision=to_branch) self.merge(revision=from_branch) self.commit(message="Merged %s" % from_branch) merge_queue.pop(0) # Check if we need to delete or close the feature branch. if delete and feature_branch and self.is_feature_branch(feature_branch.revision): # Delete or close the feature branch. self.delete_branch( branch_name=feature_branch.revision, message="Closing feature branch %s" % feature_branch.revision, ) # Update the working tree to the default branch. self.checkout() logger.info("Done! Finished merging up in %s.", timer) return revision_to_merge
python
{ "resource": "" }
q45729
Repository.pull
train
def pull(self, remote=None, revision=None): """ Pull changes from a remote repository into the local repository. :param remote: The location of a remote repository (a string or :data:`None`). :param revision: A specific revision to pull (a string or :data:`None`). If used in combination with :class:`limit_vcs_updates` this won't perform redundant updates. """ remote = remote or self.remote # Make sure the local repository exists. if self.create() and (remote == self.remote or not remote): # Don't waste time pulling from a remote repository that we just cloned. logger.info("Skipping pull from default remote because we just created the local %s repository.", self.friendly_name) return # Make sure there is a remote repository to pull from. if not (remote or self.default_pull_remote): logger.info("Skipping pull (no default remote is configured).") return # Check if we're about to perform a redundant pull. update_limit = int(os.environ.get(UPDATE_VARIABLE, '0')) if update_limit and self.last_updated >= update_limit: logger.info("Skipping pull due to update limit.") return # Pull the changes from the remote repository. timer = Timer() logger.info("Pulling changes from %s into local %s repository (%s) ..", remote or "default remote", self.friendly_name, format_path(self.local)) self.context.execute(*self.get_pull_command(remote=remote, revision=revision)) logger.debug("Took %s to pull changes from remote %s repository.", timer, self.friendly_name) self.mark_updated()
python
{ "resource": "" }
q45730
Repository.push
train
def push(self, remote=None, revision=None): """ Push changes from the local repository to a remote repository. :param remote: The location of a remote repository (a string or :data:`None`). :param revision: A specific revision to push (a string or :data:`None`). .. warning:: Depending on the version control backend the push command may fail when there are no changes to push. No attempt has been made to make this behavior consistent between implementations (although the thought has crossed my mind and I'll likely revisit this in the future). """ # Make sure the local repository exists. self.ensure_exists() # Make sure there is a remote repository to push to. if not (remote or self.remote or self.default_push_remote): logger.info("Skipping push (no default remote is configured).") # Push the changes to the remote repository. timer = Timer() logger.info("Pushing changes from %s to %s ..", format_path(self.local), remote or self.remote or "default remote") self.context.execute(*self.get_push_command(remote, revision)) logger.debug("Took %s to push changes to remote repository.", timer)
python
{ "resource": "" }
q45731
Repository.release_to_branch
train
def release_to_branch(self, release_id): """ Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'. """ self.ensure_release_scheme('branches') return self.releases[release_id].revision.branch
python
{ "resource": "" }
q45732
Repository.release_to_tag
train
def release_to_tag(self, release_id): """ Shortcut to translate a release identifier to a tag name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A tag name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'tags'. """ self.ensure_release_scheme('tags') return self.releases[release_id].revision.tag
python
{ "resource": "" }
q45733
Repository.select_release
train
def select_release(self, highest_allowed_release): """ Select the newest release that is not newer than the given release. :param highest_allowed_release: The identifier of the release that sets the upper bound for the selection (a string). :returns: The identifier of the selected release (a string). :raises: :exc:`~vcs_repo_mgr.exceptions.NoMatchingReleasesError` when no matching releases are found. """ matching_releases = [] highest_allowed_key = natsort_key(highest_allowed_release) for release in self.ordered_releases: release_key = natsort_key(release.identifier) if release_key <= highest_allowed_key: matching_releases.append(release) if not matching_releases: msg = "No releases below or equal to %r found in repository!" raise NoMatchingReleasesError(msg % highest_allowed_release) return matching_releases[-1]
python
{ "resource": "" }
q45734
Repository.update_context
train
def update_context(self): """ Try to ensure that external commands are executed in the local repository. What :func:`update_context()` does depends on whether the directory given by :attr:`local` exists: - If :attr:`local` exists then the working directory of :attr:`context` will be set to :attr:`local`. This is to ensure that version control commands are run inside of the intended version control repository. - If :attr:`local` doesn't exist then the working directory of :attr:`context` is cleared. This avoids external commands from failing due to an invalid (non existing) working directory. """ if self.context.is_directory(self.local): # Set the working directory of the execution context # to the directory containing the local repository. self.context.options['directory'] = self.local else: # Clear the execution context's working directory. self.context.options.pop('directory', None)
python
{ "resource": "" }
q45735
enumerate
train
def enumerate(vendor_id=0, product_id=0): """ Enumerate the HID Devices. Returns a generator that yields all of the HID devices attached to the system. :param vendor_id: Only return devices which match this vendor id :type vendor_id: int :param product_id: Only return devices which match this product id :type product_id: int :return: Generator that yields informations about attached HID devices :rval: generator(DeviceInfo) """ info = hidapi.hid_enumerate(vendor_id, product_id) while info: yield DeviceInfo(info) info = info.next hidapi.hid_free_enumeration(info)
python
{ "resource": "" }
q45736
Device.write
train
def write(self, data, report_id=b'\0'): """ Write an Output report to a HID device. This will send the data on the first OUT endpoint, if one exists. If it does not, it will be sent the data through the Control Endpoint (Endpoint 0). :param data: The data to be sent :type data: str/bytes :param report_id: The Report ID to write to (default: 0x0) """ self._check_device_status() bufp = ffi.new("unsigned char[]", len(data)+1) buf = ffi.buffer(bufp, len(data)+1) buf[0] = report_id buf[1:] = data rv = hidapi.hid_write(self._device, bufp, len(data)+1) if rv == -1: raise IOError("Failed to write to HID device.")
python
{ "resource": "" }
q45737
Device.read
train
def read(self, length, timeout_ms=0, blocking=False): """ Read an Input report from a HID device with timeout. Input reports are returned to the host through the `INTERRUPT IN` endpoint. The first byte will contain the Report number if the device uses numbered reports. By default reads are non-blocking, i.e. the method will return `None` if no data was available. Blocking reads can be enabled with :param blocking:. Additionally, a timeout for the read can be specified. :param length: The number of bytes to read. For devices with multiple reports, make sure to read an extra byte for the report number. :param timeout_ms: Timeout in miliseconds :type timeout_ms: int :param blocking: Block until data is available """ self._check_device_status() bufp = ffi.new("unsigned char[]", length) if not timeout_ms and blocking: timeout_ms = -1 if timeout_ms: rv = hidapi.hid_read_timeout(self._device, bufp, length, timeout_ms) else: rv = hidapi.hid_read(self._device, bufp, length) if rv == -1: raise IOError("Failed to read from HID device: {0}" .format(self._get_last_error_string())) elif rv == 0: return None else: return ffi.buffer(bufp, rv)[:]
python
{ "resource": "" }
q45738
Device.get_manufacturer_string
train
def get_manufacturer_string(self): """ Get the Manufacturer String from the HID device. :return: The Manufacturer String :rtype: unicode """ self._check_device_status() str_p = ffi.new("wchar_t[]", 255) rv = hidapi.hid_get_manufacturer_string(self._device, str_p, 255) if rv == -1: raise IOError("Failed to read manufacturer string from HID " "device: {0}".format(self._get_last_error_string())) return ffi.string(str_p)
python
{ "resource": "" }
q45739
Device.get_product_string
train
def get_product_string(self): """ Get the Product String from the HID device. :return: The Product String :rtype: unicode """ self._check_device_status() str_p = ffi.new("wchar_t[]", 255) rv = hidapi.hid_get_product_string(self._device, str_p, 255) if rv == -1: raise IOError("Failed to read product string from HID device: {0}" .format(self._get_last_error_string())) return ffi.string(str_p)
python
{ "resource": "" }
q45740
Device.get_serial_number_string
train
def get_serial_number_string(self): """ Get the Serial Number String from the HID device. :return: The Serial Number String :rtype: unicode """ self._check_device_status() str_p = ffi.new("wchar_t[]", 255) rv = hidapi.hid_get_serial_number_string(self._device, str_p, 255) if rv == -1: raise IOError("Failed to read serial number string from HID " "device: {0}".format(self._get_last_error_string())) return ffi.string(str_p)
python
{ "resource": "" }
q45741
Device.send_feature_report
train
def send_feature_report(self, data, report_id=0x0): """ Send a Feature report to the device. Feature reports are sent over the Control endpoint as a Set_Report transfer. :param data: The data to send :type data: str/bytes :param report_id: The Report ID to send to :type report_id: int """ self._check_device_status() bufp = ffi.new("unsigned char[]", len(data)+1) buf = ffi.buffer(bufp, len(data)+1) buf[0] = report_id buf[1:] = data rv = hidapi.hid_send_feature_report(self._device, bufp, len(bufp)) if rv == -1: raise IOError("Failed to send feature report to HID device: {0}" .format(self._get_last_error_string()))
python
{ "resource": "" }
q45742
Device.get_feature_report
train
def get_feature_report(self, report_id, length): """ Get a feature report from the device. :param report_id: The Report ID of the report to be read :type report_id: int :return: The report data :rtype: str/bytes """ self._check_device_status() bufp = ffi.new("unsigned char[]", length+1) buf = ffi.buffer(bufp, length+1) buf[0] = report_id rv = hidapi.hid_get_feature_report(self._device, bufp, length+1) if rv == -1: raise IOError("Failed to get feature report from HID device: {0}" .format(self._get_last_error_string())) return buf[1:]
python
{ "resource": "" }
q45743
Device.get_indexed_string
train
def get_indexed_string(self, idx): """ Get a string from the device, based on its string index. :param idx: The index of the string to get :type idx: int :return: The string at the index :rtype: unicode """ self._check_device_status() bufp = ffi.new("wchar_t*") rv = hidapi.hid_get_indexed_string(self._device, idx, bufp, 65536) if rv == -1: raise IOError("Failed to read string with index {0} from HID " "device: {0}" .format(idx, self._get_last_error_string())) return ffi.buffer(bufp, 65536)[:].strip()
python
{ "resource": "" }
q45744
Device.close
train
def close(self): """ Close connection to HID device. Automatically run when a Device object is garbage-collected, though manual invocation is recommended. """ self._check_device_status() hidapi.hid_close(self._device) self._device = None
python
{ "resource": "" }
q45745
AdblockURLFilterMeta.load_raw_rules
train
def load_raw_rules(cls, url): "Load raw rules from url or package file." raw_rules = [] filename = url.split('/')[-1] # e.g.: easylist.txt try: with closing(request.get(url, stream=True)) as file: file.raise_for_status() # lines = 0 # to be removed for rule in file.iter_lines(): raw_rules.append(rule.strip()) # lines += 1 # tbr # if lines == 2500: break # tbr, only for windoze with no re2 logger.info("Adblock online %s: %d", filename, len(raw_rules)) except: # file server down or bad url with open(resource_filename('summary', filename), 'r') as file: for rule in file: raw_rules.append(rule.strip()) logger.info("Adblock offline %s: %d", filename, len(raw_rules)) return raw_rules
python
{ "resource": "" }
q45746
AdblockURLFilterMeta.get_all_rules
train
def get_all_rules(cls): "Load all available Adblock rules." from adblockparser import AdblockRules raw_rules = [] for url in [ config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]: raw_rules.extend(cls.load_raw_rules(url)) rules = AdblockRules(raw_rules) return rules
python
{ "resource": "" }
q45747
NoImageFilter.get_image
train
def get_image(cls, url): """ Returned Image instance has response url. This might be different than the url param because of redirects. """ from PIL.ImageFile import Parser as PILParser length = 0 raw_image = None with closing(request.get(url, stream=True)) as response: response.raise_for_status() response_url = response.url parser = PILParser() for chunk in response.iter_content(config.CHUNK_SIZE): length += len(chunk) if length > config.IMAGE_MAX_BYTESIZE: del parser raise cls.MaxBytesException parser.feed(chunk) # comment this to get the whole file if parser.image and parser.image.size: raw_image = parser.image del parser # free some memory break # or this to get just the size and format # raw_image = parser.close() if length == 0: raise cls.ZeroBytesException if not raw_image: raise cls.NoImageException image = Image(response_url, raw_image.size, raw_image.format) return image
python
{ "resource": "" }
q45748
FormatImageFilter.check_animated
train
def check_animated(cls, raw_image): "Checks whether the gif is animated." try: raw_image.seek(1) except EOFError: isanimated= False else: isanimated= True raise cls.AnimatedImageException
python
{ "resource": "" }
q45749
valid_options
train
def valid_options(kwargs, allowed_options): """ Checks that kwargs are valid API options""" diff = set(kwargs) - set(allowed_options) if diff: print("Invalid option(s): ", ', '.join(diff)) return False return True
python
{ "resource": "" }
q45750
Course.create
train
def create(self, fullname, shortname, category_id, **kwargs): """ Create a new course :param string fullname: The course's fullname :param string shortname: The course's shortname :param int category_id: The course's category :keyword string idnumber: (optional) Course ID number. \ Yes, it's a string, blame Moodle. :keyword int summaryformat: (optional) Defaults to 1 (HTML). \ Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \ or 4 = Markdown) :keyword string format: (optional) Defaults to "topics" Topic options: (weeks, topics, social, site) :keyword bool showgrades: (optional) Defaults to True. \ Determines if grades are shown :keyword int newsitems: (optional) Defaults to 5. \ Number of recent items appearing on the course page :keyword bool startdate: (optional) Timestamp when the course start :keyword int maxbytes: (optional) Defaults to 83886080. \ Largest size of file that can be uploaded into the course :keyword bool showreports: Default to True. Are activity report shown? :keyword bool visible: (optional) Determines if course is \ visible to students :keyword int groupmode: (optional) Defaults to 2. options: (0 = no group, 1 = separate, 2 = visible) :keyword bool groupmodeforce: (optional) Defaults to False. \ Force group mode :keyword int defaultgroupingid: (optional) Defaults to 0. \ Default grouping id :keyword bool enablecompletion: (optional) Enable control via \ completion in activity settings. :keyword bool completionstartonenrol: (optional) \ Begin tracking a student's progress in course completion after :keyword bool completionnotify: (optional) Default? Dunno. \ Presumably notifies course completion :keyword string lang: (optional) Force course language. :keyword string forcetheme: (optional) Name of the force theme Example Usage:: >>> import muddle >>> muddle.course().create('a new course', 'new-course', 20) """ allowed_options = ['idnumber', 'summaryformat', 'format', 'showgrades', 'newsitems', 'startdate', 'maxbytes', 'showreports', 'visible', 'groupmode', 'groupmodeforce', 'jdefaultgroupingid', 'enablecompletion', 'completionstartonenrol', 'completionnotify', 'lang', 'forcetheme'] if valid_options(kwargs, allowed_options): option_params = {} for index, key in enumerate(kwargs): val = kwargs.get(key) if isinstance(val, bool): val = int(val) option_params.update({'courses[0][' + key + ']': val}) params = {'wsfunction': 'core_course_create_courses', 'courses[0][fullname]': fullname, 'courses[0][shortname]': shortname, 'courses[0][categoryid]': category_id} params.update(option_params) params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
python
{ "resource": "" }
q45751
Course.delete
train
def delete(self): """ Deletes a specified courses Example Usage:: >>> import muddle >>> muddle.course(10).delete() """ params = {'wsfunction': 'core_course_delete_courses', 'courseids[0]': self.course_id} params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
python
{ "resource": "" }
q45752
Course.contents
train
def contents(self): """ Returns entire contents of course page :returns: response object Example Usage:: >>> import muddle >>> muddle.course(10).content() """ params = self.request_params params.update({'wsfunction': 'core_course_get_contents', 'courseid': self.course_id}) return requests.get(self.api_url, params=params, verify=False).json()
python
{ "resource": "" }
q45753
Course.export_data
train
def export_data(self, export_to, delete_content=False): """ Export course data to another course. Does not include any user data. :param bool delete_content: (optional) Delete content \ from source course. Example Usage:: >>> import muddle >>> muddle.course(10).export_data(12) """ params = {'wsfunction': 'core_course_import_course', 'importfrom': self.course_id, 'importto': export_to, 'deletecontent': int(delete_content)} params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
python
{ "resource": "" }
q45754
Category.details
train
def details(self): """ Returns details for given category :returns: category response object Example Usage:: >>> import muddle >>> muddle.category(10).details() """ params = {'wsfunction': 'core_course_get_categories', 'criteria[0][key]': 'id', 'criteria[0][value]': self.category_id} params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
python
{ "resource": "" }
q45755
check_path
train
def check_path(path): """Check that a path is legal. :return: the path if all is OK :raise ValueError: if the path is illegal """ if path is None or path == b'' or path.startswith(b'/'): raise ValueError("illegal path '%s'" % path) if ( (sys.version_info[0] >= 3 and not isinstance(path, bytes)) and (sys.version_info[0] == 2 and not isinstance(path, str)) ): raise TypeError("illegale type for path '%r'" % path) return path
python
{ "resource": "" }
q45756
format_path
train
def format_path(p, quote_spaces=False): """Format a path in utf8, quoting it if necessary.""" if b'\n' in p: p = re.sub(b'\n', b'\\n', p) quote = True else: quote = p[0] == b'"' or (quote_spaces and b' ' in p) if quote: extra = GIT_FAST_IMPORT_NEEDS_EXTRA_SPACE_AFTER_QUOTE and b' ' or b'' p = b'"' + p + b'"' + extra return p
python
{ "resource": "" }
q45757
format_who_when
train
def format_who_when(fields): """Format a tuple of name,email,secs-since-epoch,utc-offset-secs as a string.""" offset = fields[3] if offset < 0: offset_sign = b'-' offset = abs(offset) else: offset_sign = b'+' offset_hours = offset // 3600 offset_minutes = offset // 60 - offset_hours * 60 offset_str = offset_sign + ('%02d%02d' % (offset_hours, offset_minutes)).encode('ascii') name = fields[0] if name == b'': sep = b'' else: sep = b' ' name = utf8_bytes_string(name) email = fields[1] email = utf8_bytes_string(email) return b''.join((name, sep, b'<', email, b'> ', ("%d" % fields[2]).encode('ascii'), b' ', offset_str))
python
{ "resource": "" }
q45758
ImportCommand.dump_str
train
def dump_str(self, names=None, child_lists=None, verbose=False): """Dump fields as a string. For debugging. :param names: the list of fields to include or None for all public fields :param child_lists: dictionary of child command names to fields for that child command to include :param verbose: if True, prefix each line with the command class and display fields as a dictionary; if False, dump just the field values with tabs between them """ interesting = {} if names is None: fields = [ k for k in list(self.__dict__.keys()) if not k.startswith(b'_') ] else: fields = names for field in fields: value = self.__dict__.get(field) if field in self._binary and value is not None: value = b'(...)' interesting[field] = value if verbose: return "%s: %s" % (self.__class__.__name__, interesting) else: return "\t".join([repr(interesting[k]) for k in fields])
python
{ "resource": "" }
q45759
CommitCommand.iter_files
train
def iter_files(self): """Iterate over files.""" # file_iter may be a callable or an iterator if callable(self.file_iter): return self.file_iter() return iter(self.file_iter)
python
{ "resource": "" }
q45760
_open_repo
train
def _open_repo(args, path_key='<path>'): """Open and return the repository containing the specified file. The file is specified by looking up `path_key` in `args`. This value or `None` is passed to `open_repository`. Returns: A `Repository` instance. Raises: ExitError: If there is a problem opening the repo. """ path = pathlib.Path(args[path_key]) if args[path_key] else None try: repo = open_repository(path) except ValueError as exc: raise ExitError(ExitCode.DATA_ERR, str(exc)) return repo
python
{ "resource": "" }
q45761
_get_anchor
train
def _get_anchor(repo, id_prefix): """Get an anchor by ID, or a prefix of its id. """ result = None for anchor_id, anchor in repo.items(): if anchor_id.startswith(id_prefix): if result is not None: raise ExitError( ExitCode.DATA_ERR, 'Ambiguous ID specification') result = (anchor_id, anchor) if result is None: raise ExitError( ExitCode.DATA_ERR, 'No anchor matching ID specification') return result
python
{ "resource": "" }
q45762
_launch_editor
train
def _launch_editor(starting_text=''): "Launch editor, let user write text, then return that text." # TODO: What is a reasonable default for windows? Does this approach even # make sense on windows? editor = os.environ.get('EDITOR', 'vim') with tempfile.TemporaryDirectory() as dirname: filename = pathlib.Path(dirname) / 'metadata.yml' with filename.open(mode='wt') as handle: handle.write(starting_text) subprocess.call([editor, filename]) with filename.open(mode='rt') as handle: text = handle.read() return text
python
{ "resource": "" }
q45763
gdate_to_jdn
train
def gdate_to_jdn(date): """ Compute Julian day from Gregorian day, month and year. Algorithm from wikipedia's julian_day article. Return: The julian day number """ not_jan_or_feb = (14 - date.month) // 12 year_since_4800bc = date.year + 4800 - not_jan_or_feb month_since_4800bc = date.month + 12 * not_jan_or_feb - 3 jdn = date.day + (153 * month_since_4800bc + 2) // 5 \ + 365 * year_since_4800bc \ + (year_since_4800bc // 4 - year_since_4800bc // 100 + year_since_4800bc // 400) - 32045 return jdn
python
{ "resource": "" }
q45764
hdate_to_jdn
train
def hdate_to_jdn(date): """ Compute Julian day from Hebrew day, month and year. Return: julian day number, 1 of tishrey julians, 1 of tishrey julians next year """ day = date.day month = date.month if date.month == 13: month = 6 if date.month == 14: month = 6 day += 30 # Calculate days since 1,1,3744 day = _days_from_3744(date.year) + (59 * (month - 1) + 1) // 2 + day # length of year length_of_year = get_size_of_hebrew_year(date.year) # Special cases for this year if length_of_year % 10 > 4 and month > 2: # long Heshvan day += 1 if length_of_year % 10 < 4 and month > 3: # short Kislev day -= 1 if length_of_year > 365 and month > 6: # leap year day += 30 # adjust to julian return day + 1715118
python
{ "resource": "" }
q45765
jdn_to_gdate
train
def jdn_to_gdate(jdn): """ Convert from the Julian day to the Gregorian day. Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer. Return: day, month, year """ # pylint: disable=invalid-name # The algorithm is a verbatim copy from Peter Meyer's article # No explanation in the article is given for the variables # Hence the exceptions for pylint and for flake8 (E741) l = jdn + 68569 # noqa: E741 n = (4 * l) // 146097 l = l - (146097 * n + 3) // 4 # noqa: E741 i = (4000 * (l + 1)) // 1461001 # that's 1,461,001 l = l - (1461 * i) // 4 + 31 # noqa: E741 j = (80 * l) // 2447 day = l - (2447 * j) // 80 l = j // 11 # noqa: E741 month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l # that's a lower-case L return datetime.date(year, month, day)
python
{ "resource": "" }
q45766
jdn_to_hdate
train
def jdn_to_hdate(jdn): """Convert from the Julian day to the Hebrew day.""" # calculate Gregorian date date = jdn_to_gdate(jdn) # Guess Hebrew year is Gregorian year + 3760 year = date.year + 3760 jdn_tishrey1 = hdate_to_jdn(HebrewDate(year, 1, 1)) jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, 1, 1)) # Check if computed year was underestimated if jdn_tishrey1_next_year <= jdn: year = year + 1 jdn_tishrey1 = jdn_tishrey1_next_year jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, 1, 1)) size_of_year = get_size_of_hebrew_year(year) # days into this year, first month 0..29 days = jdn - jdn_tishrey1 # last 8 months always have 236 days if days >= (size_of_year - 236): # in last 8 months days = days - (size_of_year - 236) month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 4 + 1 # if leap if size_of_year > 355 and month <= 6: month = month + 8 else: # in 4-5 first months # Special cases for this year if size_of_year % 10 > 4 and days == 59: # long Heshvan (day 30) month = 1 day = 30 elif size_of_year % 10 > 4 and days > 59: # long Heshvan month = (days - 1) * 2 // 59 day = days - (month * 59 + 1) // 2 elif size_of_year % 10 < 4 and days > 87: # short kislev month = (days + 1) * 2 // 59 day = days - (month * 59 + 1) // 2 + 2 else: # regular months month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 1 return HebrewDate(year, month, day)
python
{ "resource": "" }
q45767
update
train
def update(anchor, handle=None): """Update an anchor based on the current contents of its source file. Args: anchor: The `Anchor` to be updated. handle: File-like object containing contents of the anchor's file. If `None`, then this function will open the file and read it. Returns: A new `Anchor`, possibly identical to the input. Raises: ValueError: No alignments could be found between old anchor and new text. AlignmentError: If no anchor could be created. The message of the exception will say what the problem is. """ if handle is None: with anchor.file_path.open(mode='rt') as fp: source_text = fp.read() else: source_text = handle.read() handle.seek(0) ctxt = anchor.context a_score, alignments = align(ctxt.full_text, source_text, score, gap_penalty) # max_score = len(ctxt.full_text) * 3 try: alignment = next(alignments) except StopIteration: raise AlignmentError('No alignments for anchor: {}'.format(anchor)) anchor_offset = ctxt.offset - len(ctxt.before) source_indices = tuple( s_idx for (a_idx, s_idx) in alignment if a_idx is not None if s_idx is not None if _index_in_topic(a_idx + anchor_offset, anchor)) if not source_indices: raise AlignmentError( "Best alignment does not map topic to updated source.") return make_anchor( file_path=anchor.file_path, offset=source_indices[0], width=len(source_indices), context_width=anchor.context.width, metadata=anchor.metadata, handle=handle)
python
{ "resource": "" }
q45768
get
train
def get(context, tags: List[str], version: int, verbose: bool, bundle: str): """Get files.""" store = Store(context.obj['database'], context.obj['root']) files = store.files(bundle=bundle, tags=tags, version=version) for file_obj in files: if verbose: tags = ', '.join(tag.name for tag in file_obj.tags) click.echo(f"{click.style(str(file_obj.id), fg='blue')} | {file_obj.full_path} | " f"{click.style(tags, fg='yellow')}") else: click.echo(file_obj.full_path)
python
{ "resource": "" }
q45769
ProgressBar._get_callargs
train
def _get_callargs(self, *args, **kwargs): """ Retrieve all arguments that `self.func` needs and return a dictionary with call arguments. """ callargs = getcallargs(self.func, *args, **kwargs) return callargs
python
{ "resource": "" }
q45770
GitHubUser.export
train
def export(self): """Export all attributes of the user to a dict. :return: attributes of the user. :rtype: dict. """ data = {} data["name"] = self.name data["contributions"] = self.contributions data["avatar"] = self.avatar data["followers"] = self.followers data["join"] = self.join data["organizations"] = self.organizations data["repositories"] = self.numberOfRepos data["bio"] = self.bio data["private"] = self.private data["public"] = self.public data["location"] = self.location return data
python
{ "resource": "" }
q45771
GitHubUser.__getContributions
train
def __getContributions(self, web): """Scrap the contributions from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ contributions_raw = web.find_all('h2', {'class': 'f4 text-normal mb-2'}) try: contrText = contributions_raw[0].text contrText = contrText.lstrip().split(" ")[0] contrText = contrText.replace(",", "") except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error) self.contributions = int(contrText)
python
{ "resource": "" }
q45772
GitHubUser.__getAvatar
train
def __getAvatar(self, web): """Scrap the avatar from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ try: self.avatar = web.find("img", {"class": "avatar"})['src'][:-10] except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45773
GitHubUser.__getNumberOfRepositories
train
def __getNumberOfRepositories(self, web): """Scrap the number of repositories from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ counters = web.find_all('span', {'class': 'Counter'}) try: if 'k' not in counters[0].text: self.numberOfRepos = int(counters[0].text) else: reposText = counters[0].text.replace(" ", "") reposText = reposText.replace("\n", "").replace("k", "") if reposText and len(reposText) > 1: self.numberOfRepos = int(reposText.split(".")[0]) * \ 1000 + int(reposText.split(".")[1]) * 100 elif reposText: self.numberOfRepos = int(reposText.split(".")[0]) * 1000 except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45774
GitHubUser.__getNumberOfFollowers
train
def __getNumberOfFollowers(self, web): """Scrap the number of followers from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ counters = web.find_all('span', {'class': 'Counter'}) try: if 'k' not in counters[2].text: self.followers = int(counters[2].text) else: follText = counters[2].text.replace(" ", "") follText = follText.replace("\n", "").replace("k", "") if follText and len(follText) > 1: self.followers = int(follText.split(".")[0])*1000 + \ int(follText.split(".")[1]) * 100 elif follText: self.followers = int(follText.split(".")[0])*1000 except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45775
GitHubUser.__getLocation
train
def __getLocation(self, web): """Scrap the location from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ try: self.location = web.find("span", {"class": "p-label"}).text except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45776
GitHubUser.__getJoin
train
def __getJoin(self, web): """Scrap the join date from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ join = web.findAll("a", {"class": "dropdown-item"}) for j in join: try: if "Joined GitHub" in j.text: self.join = j["href"][-10:] except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45777
GitHubUser.__getBio
train
def __getBio(self, web): """Scrap the bio from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ bio = web.find_all("div", {"class": "user-profile-bio"}) if bio: try: bio = bio[0].text if bio and GitHubUser.isASCII(bio): bioText = bio.replace("\n", "") bioText = bioText.replace("\t", " ").replace("\"", "") bioText = bioText.replace("\'", "").replace("\\", "") self.bio = bioText else: self.bio = "" except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error)
python
{ "resource": "" }
q45778
GitHubUser.__getOrganizations
train
def __getOrganizations(self, web): """Scrap the number of organizations from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ orgsElements = web.find_all("a", {"class": "avatar-group-item"}) self.organizations = len(orgsElements)
python
{ "resource": "" }
q45779
GitHubUser.getData
train
def getData(self): """Get data of the GitHub user.""" url = self.server + self.name data = GitHubUser.__getDataFromURL(url) web = BeautifulSoup(data, "lxml") self.__getContributions(web) self.__getLocation(web) self.__getAvatar(web) self.__getNumberOfRepositories(web) self.__getNumberOfFollowers(web) self.__getBio(web) self.__getJoin(web) self.__getOrganizations(web)
python
{ "resource": "" }
q45780
GitHubUser.__getDataFromURL
train
def __getDataFromURL(url): """Read HTML data from an user GitHub profile. :param url: URL of the webpage to download. :type url: str. :return: webpage donwloaded. :rtype: str. """ code = 0 while code != 200: req = Request(url) try: response = urlopen(req) code = response.code sleep(0.01) except HTTPError as error: code = error.code if code == 404: break except URLError as error: sleep(3) if code == 404: raise Exception("User was not found") return response.read().decode('utf-8')
python
{ "resource": "" }
q45781
Settings.clean
train
def clean(self, settings): """ Filter given settings to keep only key names available in ``DEFAULT_SETTINGS``. Args: settings (dict): Loaded settings. Returns: dict: Settings object filtered. """ return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS}
python
{ "resource": "" }
q45782
Settings.set_settings
train
def set_settings(self, settings): """ Set every given settings as object attributes. Args: settings (dict): Dictionnary of settings. """ for k, v in settings.items(): setattr(self, k, v)
python
{ "resource": "" }
q45783
Settings.update
train
def update(self, settings): """ Update object attributes from given settings Args: settings (dict): Dictionnary of elements to update settings. Returns: dict: Dictionnary of all current saved settings. """ settings = self.clean(settings) # Update internal dict self._settings.update(settings) # Push every setting items as class object attributes self.set_settings(settings) return self._settings
python
{ "resource": "" }
q45784
BasenwcParser._fetch_output_files
train
def _fetch_output_files(self, retrieved): """ Checks the output folder for standard output and standard error files, returns their absolute paths on success. :param retrieved: A dictionary of retrieved nodes, as obtained from the parser. """ from aiida.common.datastructures import calc_states from aiida.common.exceptions import InvalidOperation import os # check in order not to overwrite anything # state = self._calc.get_state() # if state != calc_states.PARSING: # raise InvalidOperation("Calculation not in {} state" # .format(calc_states.PARSING) ) # Check that the retrieved folder is there try: out_folder = retrieved[self._calc._get_linkname_retrieved()] except KeyError: raise IOError("No retrieved folder found") list_of_files = out_folder.get_folder_list() output_path = None error_path = None if self._calc._DEFAULT_OUTPUT_FILE in list_of_files: output_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_OUTPUT_FILE) if self._calc._DEFAULT_ERROR_FILE in list_of_files: error_path = os.path.join(out_folder.get_abs_path('.'), self._calc._DEFAULT_ERROR_FILE) return output_path, error_path
python
{ "resource": "" }
q45785
ContentExtractor._tidy
train
def _tidy(self, html, smart_tidy): """ Tidy HTML if we have a tidy method. This fixes problems with some sites which would otherwise trouble DOMDocument's HTML parsing. Although sometimes it makes the problem worse, which is why we can override it in site config files. """ if self.config.tidy and tidylib and smart_tidy: try: document, errors = tidylib.tidy_document(html, self.tidy_config) except UnicodeDecodeError: # For some reason, pytidylib fails to decode, whereas the # original html content converts perfectly manually. document, errors = tidylib.tidy_document(html.encode('utf-8'), self.tidy_config) document = document.decode('utf-8') # if errors: # LOGGER.debug(u'Ignored errors returned by tidylib: %s', # errors) self.tidied = True self.html = document LOGGER.info(u'Tidied document.') else: self.html = html
python
{ "resource": "" }
q45786
ContentExtractor._parse_html
train
def _parse_html(self): """ Load the parser and parse `self.html`. """ if self.config.parser != 'lxml': raise NotImplementedError('%s parser not implemented' % self.config.parser) self.parser = etree.HTMLParser() try: self.parsed_tree = etree.parse(StringIO(self.html), self.parser) except ValueError, e: if u'Unicode strings with encoding declaration are not supported' \ in unicode(e): # For some reason, the HTML/XML declares another encoding # in its meta tags. TODO: we should probably remove this # meta tag, because the sparks detection mechanism usually # does a pretty good job at finding it. # # For now, this will fail for anything other than utf-8 and # make the program crash. self.parsed_tree = etree.parse(StringIO( self.html.encode('utf-8')), self.parser)
python
{ "resource": "" }
q45787
ContentExtractor._extract_next_page_link
train
def _extract_next_page_link(self): """ Try to get next page link. """ # HEADS UP: we do not abort if next_page_link is already set: # we try to find next (eg. find 3 if already at page 2). for pattern in self.config.next_page_link: items = self.parsed_tree.xpath(pattern) if not items: continue if len(items) == 1: item = items[0] if 'href' in item.keys(): self.next_page_link = item.get('href') else: self.next_page_link = item.text.strip() LOGGER.info(u'Found next page link: %s.', self.next_page_link) # First found link is the good one. break else: LOGGER.warning(u'%s items for next-page link %s', items, pattern, extra={'siteconfig': self.config.host})
python
{ "resource": "" }
q45788
ContentExtractor._extract_date
train
def _extract_date(self): """ Extract date from HTML. """ if self.date: return found = False for pattern in self.config.date: items = self.parsed_tree.xpath(pattern) if isinstance(items, basestring): # In case xpath returns only one element. items = [items] for item in items: if isinstance(item, basestring): # '_ElementStringResult' object has no attribute 'text' stripped_date = unicode(item).strip() else: try: stripped_date = item.text.strip() except AttributeError: # .text is None. We got a <div> item with span-only # content. The result will probably be completely # useless to a python developer, but at least we # didn't fail handling the siteconfig directive. stripped_date = etree.tostring(item) if stripped_date: # self.date = strtotime(trim(elems, "; \t\n\r\0\x0B")) self.date = stripped_date LOGGER.info(u'Date extracted: %s.', stripped_date, extra={'siteconfig': self.config.host}) found = True break if found: break
python
{ "resource": "" }
q45789
ContentExtractor._extract_body
train
def _extract_body(self): """ Extract the body content from HTML. """ def is_descendant_node(parent, node): node = node.getparent() while node is not None: if node == parent: return True node = node.getparent() return False for pattern in self.config.body: items = self.parsed_tree.xpath(pattern) if len(items) == 1: if self.config.prune: self.body = Document(etree.tostring(items[0])).summary() else: self.body = etree.tostring(items[0]) # We've got a body now. break else: appended_something = False body = etree.Element("root") for item in items: if item.getparent() is None: continue is_descendant = False for parent in body: if (is_descendant_node(parent, item)): is_descendant = True break if not is_descendant: if self.config.prune: # Clean with readability. Needs # to-string conversion first. pruned_string = Document( etree.tostring(item)).summary() # Re-parse the readability string # output and include it in our body. new_tree = etree.parse( StringIO(pruned_string), self.parser) failed = False try: body.append( new_tree.xpath('//html/body/div/div')[0] ) except IndexError: if 'id="readabilityBody"' in pruned_string: try: body.append( new_tree.xpath('//body') ) except: failed = True else: failed = True if failed: LOGGER.error(u'Pruning item failed:' u'\n\n%s\n\nWe got: “%s” ' u'and skipped it.', etree.tostring( item).replace(u'\n', u''), pruned_string.replace(u'\n', u''), extra={'siteconfig': self.config.host}) pass else: body.append(item) appended_something = True if appended_something: self.body = etree.tostring(body) # We've got a body now. break
python
{ "resource": "" }
q45790
ContentExtractor._auto_extract_if_failed
train
def _auto_extract_if_failed(self): """ Try to automatically extract as much as possible. """ if not self.config.autodetect_on_failure: return readabilitized = Document(self.html) if self.title is None: if bool(self.config.title): self.failures.add('title') title = readabilitized.title().strip() if title: self.title = title LOGGER.info(u'Title extracted in automatic mode.', extra={'siteconfig': self.config.host}) else: self.failures.add('title') if self.body is None: if bool(self.config.body): self.failures.add('body') body = readabilitized.summary().strip() if body: self.body = body LOGGER.info(u'Body extracted in automatic mode.', extra={'siteconfig': self.config.host}) else: self.failures.add('body') for attr_name in ('date', 'language', 'author', ): if not bool(getattr(self, attr_name, None)): if bool(getattr(self.config, attr_name, None)): self.failures.add(attr_name) LOGGER.warning(u'Could not extract any %s from XPath ' u'expression(s) %s.', attr_name, u', '.join(getattr(self.config, attr_name)), extra={'siteconfig': self.config.host})
python
{ "resource": "" }
q45791
ContentExtractor.process
train
def process(self, html, url=None, smart_tidy=True): u""" Process HTML content or URL. For automatic extraction patterns and cleanups, :mod:`readability-lxml` is used, to stick as much as possible to the original PHP implementation and produce at least similar results with the same site config on the same article/content. :param html: an unicode string containing a full HTML page content. Expected to have a ``DOCTYPE`` and all other standard attributes ; eg. HTML fragments are not supported. It will be replaced, tidied, cleaned, striped, and all metadata and body attributes will be extracted from it. Beware : this HTML piece will be mauled. See source code for exact processing workflow, it's quite gorgeous. :type html: unicode :param url: as of version 0.5, this parameter is ignored. (**TODO**) :type url: str, unicode or ``None`` :param smart_tidy: When ``True`` (default), runs :mod:`pytidylib` to tidy the HTML, after after run ``find_string``/``replace_string`` replacements and before running extractions. :type smart_tidy: bool :returns: ``True`` on success, ``False`` on failure. :raises: - :class:`RuntimeError` if config has not been set at instantiation. This should change in the future by looking up a config if an ``url`` is passed as argument. .. note:: If tidy is used and no result is produced, we will try again without tidying. Generally speaking, tidy helps us deal with PHP's patchy HTML parsing (LOOOOOL. Zeriously?) most of the time but it has problems of its own which we try to avoid with this option. In the Python implementation, `pytidylib` has showed to help sanitize a lot the HTML before processing it. But nobody's perfect, and errors can happen in the Python world too, thus the *tidy* behavior was thought sane enough to be keep. """ # TODO: re-implement URL handling with self.reset() here. if self.config is None: raise RuntimeError(u'extractor site config is not set.') # TODO: If re-running ourselves over an already-replaced string, # this should just do nothing because everything has been # done. We should have a test for that. html = self._process_replacements(html) # We keep the html untouched after replacements. # All processing happens on self.html after this point. self._tidy(html, smart_tidy) # return self._parse_html() self._extract_next_page_link() self._extract_title() self._extract_author() self._extract_language() self._extract_date() self._strip_unwanted_elements() self._extract_body() # TODO: re-implement auto-detection here. # NOTE: hNews extractor was here. # NOTE: instapaper extractor was here. self._auto_extract_if_failed() if self.title is not None or self.body is not None \ or bool(self.author) or self.date is not None \ or self.language is not None: self.success = True # if we've had no success and we've used tidy, there's a chance # that tidy has messed up. So let's try again without tidy... if not self.success and self.tidied and smart_tidy: self.process(html, url=None, smart_tidy=False) return self.success
python
{ "resource": "" }
q45792
DatalakeRecord.list_from_url
train
def list_from_url(cls, url): '''return a list of DatalakeRecords for the specified url''' key = cls._get_key(url) metadata = cls._get_metadata_from_key(key) ct = cls._get_create_time(key) time_buckets = cls.get_time_buckets_from_metadata(metadata) return [cls(url, metadata, t, ct, key.size) for t in time_buckets]
python
{ "resource": "" }
q45793
DatalakeRecord.list_from_metadata
train
def list_from_metadata(cls, url, metadata): '''return a list of DatalakeRecords for the url and metadata''' key = cls._get_key(url) metadata = Metadata(**metadata) ct = cls._get_create_time(key) time_buckets = cls.get_time_buckets_from_metadata(metadata) return [cls(url, metadata, t, ct, key.size) for t in time_buckets]
python
{ "resource": "" }
q45794
DatalakeRecord.get_time_buckets_from_metadata
train
def get_time_buckets_from_metadata(metadata): '''return a list of time buckets in which the metadata falls''' start = metadata['start'] end = metadata.get('end') or start buckets = DatalakeRecord.get_time_buckets(start, end) if len(buckets) > DatalakeRecord.MAXIMUM_BUCKET_SPAN: msg = 'metadata spans too many time buckets: {}' j = json.dumps(metadata) msg = msg.format(j) raise UnsupportedTimeRange(msg) return buckets
python
{ "resource": "" }
q45795
DatalakeRecord.get_time_buckets
train
def get_time_buckets(start, end): '''get the time buckets spanned by the start and end times''' d = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS first_bucket = start / d last_bucket = end / d return list(range( int(first_bucket), int(last_bucket) + 1))
python
{ "resource": "" }
q45796
write_pdb
train
def write_pdb(residues, chain_id=' ', alt_states=False, strip_states=False): """Writes a pdb file for a list of residues. Parameters ---------- residues : list List of Residue objects. chain_id : str String of the chain id, defaults to ' '. alt_states : bool, optional If true, include all occupancy states of residues, else outputs primary state only. strip_states : bool, optional If true, remove all state labels from residues. Only use with alt_states false. Returns ------- pdb_str : str String of the PDB file. """ pdb_atom_col_dict = PDB_ATOM_COLUMNS out_pdb = [] if len(str(chain_id)) > 1: poly_id = ' ' else: poly_id = str(chain_id) for monomer in residues: if (len(monomer.states) > 1) and alt_states and not strip_states: atom_list = itertools.chain( *[x[1].items() for x in sorted(monomer.states.items())]) else: atom_list = monomer.atoms.items() if 'chain_id' in monomer.tags: poly_id = monomer.tags['chain_id'] for atom_t, atom in atom_list: if strip_states: state_label = ' ' elif (atom.tags['state'] == 'A') and (len(monomer.states) == 1): state_label = ' ' else: state_label = atom.tags['state'] atom_data = { 'atom_number': '{:>5}'.format(cap(atom.id, 5)), 'atom_name': '{:<4}'.format(cap(pdb_atom_col_dict[atom_t], 4)), 'alt_loc_ind': '{:<1}'.format(cap(state_label, 1)), 'residue_type': '{:<3}'.format(cap(monomer.mol_code, 3)), 'chain_id': '{:<1}'.format(cap(poly_id, 1)), 'res_num': '{:>4}'.format(cap(monomer.id, 4)), 'icode': '{:<1}'.format(cap(monomer.insertion_code, 1)), 'coord_str': '{0:>8.3f}{1:>8.3f}{2:>8.3f}'.format( *[x for x in atom]), 'occupancy': '{:>6.2f}'.format(atom.tags['occupancy']), 'temp_factor': '{:>6.2f}'.format(atom.tags['bfactor']), 'element': '{:>2}'.format(cap(atom.element, 2)), 'charge': '{:<2}'.format(cap(atom.tags['charge'], 2)) } if monomer.is_hetero: pdb_line_template = ( 'HETATM{atom_number} {atom_name}{alt_loc_ind}{residue_type}' ' {chain_id}{res_num}{icode} {coord_str}{occupancy}' '{temp_factor} {element}{charge}\n' ) else: pdb_line_template = ( 'ATOM {atom_number} {atom_name}{alt_loc_ind}{residue_type}' ' {chain_id}{res_num}{icode} {coord_str}{occupancy}' '{temp_factor} {element}{charge}\n' ) out_pdb.append(pdb_line_template.format(**atom_data)) return ''.join(out_pdb)
python
{ "resource": "" }
q45797
BaseAmpal.centre_of_mass
train
def centre_of_mass(self): """Returns the centre of mass of AMPAL object. Notes ----- All atoms are included in calculation, call `centre_of_mass` manually if another selection is require. Returns ------- centre_of_mass : numpy.array 3D coordinate for the centre of mass. """ elts = set([x.element for x in self.get_atoms()]) masses_dict = {e: ELEMENT_DATA[e]['atomic mass'] for e in elts} points = [x._vector for x in self.get_atoms()] masses = [masses_dict[x.element] for x in self.get_atoms()] return centre_of_mass(points=points, masses=masses)
python
{ "resource": "" }
q45798
Monomer.get_atoms
train
def get_atoms(self, inc_alt_states=False): """Returns all atoms in the `Monomer`. Parameters ---------- inc_alt_states : bool, optional If `True`, will return `Atoms` for alternate states. """ if inc_alt_states: return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))]) return self.atoms.values()
python
{ "resource": "" }
q45799
Monomer.close_monomers
train
def close_monomers(self, group, cutoff=4.0): """Returns a list of Monomers from within a cut off distance of the Monomer Parameters ---------- group: BaseAmpal or Subclass Group to be search for Monomers that are close to this Monomer. cutoff: float Distance cut off. Returns ------- nearby_residues: [Monomers] List of Monomers within cut off distance. """ nearby_residues = [] for self_atom in self.atoms.values(): nearby_atoms = group.is_within(cutoff, self_atom) for res_atom in nearby_atoms: if res_atom.parent not in nearby_residues: nearby_residues.append(res_atom.parent) return nearby_residues
python
{ "resource": "" }