text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cursor(self):
"""The position of the cursor in the text.""" |
if self._cursor < 0:
self.cursor = 0
if self._cursor > len(self):
self.cursor = len(self)
return self._cursor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_one_letter(self, letter=RIGHT):
"""Delete one letter the right or the the left of the cursor.""" |
assert letter in (self.RIGHT, self.LEFT)
if letter == self.LEFT:
papy = self.cursor
self.text = self.text[:self.cursor - 1] + self.text[self.cursor:]
self.cursor = papy - 1
else:
self.text = self.text[:self.cursor] + self.text[self.cursor + 1:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_one_word(self, word=RIGHT):
"""Delete one word the right or the the left of the cursor.""" |
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self.text)
self.text = self.text[:self.cursor] + self.text[papy:]
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.text = self.text[:papy] + self.text[self.cursor:]
self.cursor = papy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_letter(self, letter):
"""Add a letter at the cursor pos.""" |
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed.""" |
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shawn_text(self):
"""The text displayed instead of the real one.""" |
if len(self._shawn_text) == len(self):
return self._shawn_text
if self.style == self.DOTS:
return chr(0x2022) * len(self)
ranges = [
(902, 1366),
(192, 683),
(33, 122)
]
s = ''
while len(s) < len(self.text):
apolo = randint(33, 1366)
for a, b in ranges:
if a <= apolo <= b:
s += chr(apolo)
break
self._shawn_text = s
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cursor_pos(self):
"""The cursor position in pixels.""" |
if len(self) == 0:
return self.left + self.default_text.get_width()
papy = self._surface.get_width()
if papy > self.w:
shift = papy - self.width
else:
shift = 0
return self.left + self.font.size(self.shawn_text[:self.cursor])[0] - shift |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def latex_to_img(tex):
"""Return a pygame image from a latex template.""" |
with tempfile.TemporaryDirectory() as tmpdirname:
with open(tmpdirname + r'\tex.tex', 'w') as f:
f.write(tex)
os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} "
r"-output-directory={0}".format(tmpdirname))
os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname))
# os.system(r'latex2png ' + tmpdirname)
image = pygame.image.load(tmpdirname + r'\tex.png')
return image |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name2rgb(name):
"""Convert the name of a color into its RGB value""" |
try:
import colour
except ImportError:
raise ImportError('You need colour to be installed: pip install colour')
c = colour.Color(name)
color = int(c.red * 255), int(c.green * 255), int(c.blue * 255)
return color |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_page(page):
"""Parse the command man page.""" |
colors = get_config()['colors']
with io.open(page, encoding='utf-8') as f:
lines = f.readlines()
output_lines = []
for line in lines[1:]:
if is_headline(line):
continue
elif is_description(line):
output_lines.append(click.style(line.replace('>', ' '),
fg=colors['description']))
elif is_old_usage(line):
output_lines.append(click.style(line, fg=colors['usage']))
elif is_code_example(line):
line = ' ' + line if line.startswith('`') else line[2:]
output_lines.append(click.style(line.replace('`', ''),
fg=colors['command']))
elif is_line_break(line):
output_lines.append(click.style(line))
else:
output_lines.append(click.style('- ' + line, fg=colors['usage']))
return output_lines |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine.""" |
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_username_password_hostname(remote_url):
""" Parse a command line string and return username, password, remote hostname and remote path. :param remote_url: A command line string. :return: A tuple, containing username, password, remote hostname and remote path. """ |
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ssh_agent_keys(logger):
""" Ask the SSH agent for a list of keys, and return it. :return: A reference to the SSH agent and a list of keys. """ |
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_parser():
"""Create the CLI argument parser.""" |
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(args=None):
"""The main.""" |
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.""" |
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime.""" |
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node.""" |
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree. Find files/directories that need to be deleted, not being present in the local folder. """ |
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position.""" |
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Run the sync. Confront the local and the remote directories and perform the needed changes.""" |
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_files(start_path):
"""tree unix command replacement.""" |
s = u'\n'
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, '').count(os.sep)
indent = ' ' * 4 * level
s += u'{}{}/\n'.format(indent, os.path.basename(root))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
s += u'{}{}\n'.format(sub_indent, f)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def file_tree(start_path):
""" Create a nested dictionary that represents the folder structure of `start_path`. Liberally adapted from http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/ """ |
nested_dirs = {}
root_dir = start_path.rstrip(os.sep)
start = root_dir.rfind(os.sep) + 1
for path, dirs, files in os.walk(root_dir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], nested_dirs)
parent[folders[-1]] = subdir
return nested_dirs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def capture_sys_output():
"""Capture standard output and error.""" |
capture_out, capture_err = StringIO(), StringIO()
current_out, current_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = current_out, current_err |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def suppress_logging(log_level=logging.CRITICAL):
"""Suppress logging.""" |
logging.disable(log_level)
yield
logging.disable(logging.NOTSET) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def override_env_variables():
"""Override user environmental variables with custom one.""" |
env_vars = ("LOGNAME", "USER", "LNAME", "USERNAME")
old = [os.environ[v] if v in os.environ else None for v in env_vars]
for v in env_vars:
os.environ[v] = "test"
yield
for i, v in enumerate(env_vars):
if old[i]:
os.environ[v] = old[i] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config():
"""Get the configurations from .tldrrc and return it as a dict.""" |
config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if not path.exists(config_path):
sys.exit("Can't find config file at: {0}. You may use `tldr init` "
"to init the config file.".format(config_path))
with io.open(config_path, encoding='utf-8') as f:
try:
config = yaml.safe_load(f)
except yaml.scanner.ScannerError:
sys.exit("The config file is not a valid YAML file.")
supported_colors = ['black', 'red', 'green', 'yellow', 'blue',
'magenta', 'cyan', 'white']
if not set(config['colors'].values()).issubset(set(supported_colors)):
sys.exit("Unsupported colors in config file: {0}.".format(
', '.join(set(config['colors'].values()) - set(supported_colors))))
if not path.exists(config['repo_directory']):
sys.exit("Can't find the tldr repo, check the `repo_directory` "
"setting in config file.")
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines.""" |
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory.""" |
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = (
specified_platform if specified_platform else default_platform)
with io.open(path.join(repo_directory, 'pages/index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit(
("Sorry, we don't support command: {0} right now.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
sys.exit(
("Sorry, command {0} is not supported on your platform.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
page_path = path.join(path.join(repo_directory, 'pages'),
path.join(platform, command + '.md'))
return page_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(command, on):
"""Find the command usage.""" |
output_lines = parse_man_page(command, on)
click.echo(''.join(output_lines)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update():
"""Update to the latest pages.""" |
repo_directory = get_config()['repo_directory']
os.chdir(repo_directory)
click.echo("Check for updates...")
local = subprocess.check_output('git rev-parse master'.split()).strip()
remote = subprocess.check_output(
'git ls-remote https://github.com/tldr-pages/tldr/ HEAD'.split()
).split()[0]
if local != remote:
click.echo("Updating...")
subprocess.check_call('git checkout master'.split())
subprocess.check_call('git pull --rebase'.split())
build_index()
click.echo("Update to the latest and rebuild the index.")
else:
click.echo("No need for updates.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init():
"""Init config file.""" |
default_config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if path.exists(default_config_path):
click.echo("There is already a config file exists, "
"skip initializing it.")
else:
repo_path = click.prompt("Input the tldr repo path(absolute path)")
if not path.exists(repo_path):
sys.exit("Repo path not exist, clone it first.")
platform = click.prompt("Input your platform(linux, osx or sunos)")
if platform not in ['linux', 'osx', 'sunos']:
sys.exit("Platform should be in linux, osx or sunos.")
colors = {
"description": "blue",
"usage": "green",
"command": "cyan"
}
config = {
"repo_directory": repo_path,
"colors": colors,
"platform": platform
}
with open(default_config_path, 'w') as f:
f.write(yaml.safe_dump(config, default_flow_style=False))
click.echo("Initializing the config file at {0}".format(
default_config_path)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def locate(command, on):
"""Locate the command's man page.""" |
location = find_page_location(command, on)
click.echo(location) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_to(self, attrname, tablename=None, selectable=None, schema=None, base=None, mapper_args=util.immutabledict()):
"""Configure a mapping to the given attrname. This is the "master" method that can be used to create any configuration. :param attrname: String attribute name which will be established as an attribute on this :class:.`.SQLSoup` instance. :param base: a Python class which will be used as the base for the mapped class. If ``None``, the "base" argument specified by this :class:`.SQLSoup` instance's constructor will be used, which defaults to ``object``. :param mapper_args: Dictionary of arguments which will be passed directly to :func:`.orm.mapper`. :param tablename: String name of a :class:`.Table` to be reflected. If a :class:`.Table` is already available, use the ``selectable`` argument. This argument is mutually exclusive versus the ``selectable`` argument. :param selectable: a :class:`.Table`, :class:`.Join`, or :class:`.Select` object which will be mapped. This argument is mutually exclusive versus the ``tablename`` argument. :param schema: String schema name to use if the ``tablename`` argument is present. """ |
if attrname in self._cache:
raise SQLSoupError(
"Attribute '%s' is already mapped to '%s'" % (
attrname,
class_mapper(self._cache[attrname]).mapped_table
))
if tablename is not None:
if not isinstance(tablename, basestring):
raise ArgumentError("'tablename' argument must be a string."
)
if selectable is not None:
raise ArgumentError("'tablename' and 'selectable' "
"arguments are mutually exclusive")
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
schema=schema or self.schema)
elif schema:
raise ArgumentError("'tablename' argument is required when "
"using 'schema'.")
elif selectable is not None:
if not isinstance(selectable, expression.FromClause):
raise ArgumentError("'selectable' argument must be a "
"table, select, join, or other "
"selectable construct.")
else:
raise ArgumentError("'tablename' or 'selectable' argument is "
"required.")
if not selectable.primary_key.columns and not \
'primary_key' in mapper_args:
if tablename:
raise SQLSoupError(
"table '%s' does not have a primary "
"key defined" % tablename)
else:
raise SQLSoupError(
"selectable '%s' does not have a primary "
"key defined" % selectable)
mapped_cls = _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
)
self._cache[attrname] = mapped_cls
return mapped_cls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map(self, selectable, base=None, **mapper_args):
"""Map a selectable directly. The class and its mapping are not cached and will be discarded once dereferenced (as of 0.6.6). :param selectable: an :func:`.expression.select` construct. :param base: a Python class which will be used as the base for the mapped class. If ``None``, the "base" argument specified by this :class:`.SQLSoup` instance's constructor will be used, which defaults to ``object``. :param mapper_args: Dictionary of arguments which will be passed directly to :func:`.orm.mapper`. """ |
return _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the selectable in a subquery with labels. The class and its mapping are not cached and will be discarded once dereferenced (as of 0.6.6). :param selectable: an :func:`.expression.select` construct. :param base: a Python class which will be used as the base for the mapped class. If ``None``, the "base" argument specified by this :class:`.SQLSoup` instance's constructor will be used, which defaults to ``object``. :param mapper_args: Dictionary of arguments which will be passed directly to :func:`.orm.mapper`. """ |
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(selectable).
select(use_labels=True).
alias('foo'), base=base, **mapper_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def left(self, f, n=1):
"""return the nearest n features strictly to the left of a Feature f. Overlapping features are not considered as to the left. f: a Feature object n: the number of features to return """ |
intervals = self.intervals[f.chrom]
if intervals == []: return []
iright = binsearch_left_start(intervals, f.start, 0 , len(intervals)) + 1
ileft = binsearch_left_start(intervals, f.start - self.max_len[f.chrom] - 1, 0, 0)
results = sorted((distance(other, f), other) for other in intervals[ileft:iright] if other.end < f.start and distance(f, other) != 0)
if len(results) == n:
return [r[1] for r in results]
# have to do some extra work here since intervals are sorted
# by starts, and we dont know which end may be around...
# in this case, we got some extras, just return as many as
# needed once we see a gap in distances.
for i in range(n, len(results)):
if results[i - 1][0] != results[i][0]:
return [r[1] for r in results[:i]]
if ileft == 0:
return [r[1] for r in results]
# here, didn't get enough, so move left and try again.
1/0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def right(self, f, n=1):
"""return the nearest n features strictly to the right of a Feature f. Overlapping features are not considered as to the right. f: a Feature object n: the number of features to return """ |
intervals = self.intervals[f.chrom]
ilen = len(intervals)
iright = binsearch_right_end(intervals, f.end, 0, ilen)
results = []
while iright < ilen:
i = len(results)
if i > n:
if distance(f, results[i - 1]) != distance(f, results[i - 2]):
return results[:i - 1]
other = intervals[iright]
iright += 1
if distance(other, f) == 0: continue
results.append(other)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upstream(self, f, n=1):
"""find n upstream features where upstream is determined by the strand of the query Feature f Overlapping features are not considered. f: a Feature object n: the number of features to return """ |
if f.strand == -1:
return self.right(f, n)
return self.left(f, n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by the strand of the query Feature f Overlapping features are not considered. f: a Feature object n: the number of features to return """ |
if f.strand == -1:
return self.left(f, n)
return self.right(f, n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sequence(db, chrom, start, end):
""" return the sequence for a region using the UCSC DAS server. note the start is 1-based each feature will have it's own .sequence method which sends the correct start and end to this function. 'caacttag' """ |
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_table(genome, table, table_name, connection_string, metadata):
""" alter the table to work between different dialects """ |
table = Table(table_name, genome._metadata, autoload=True,
autoload_with=genome.bind, extend_existing=True)
#print "\t".join([c.name for c in table.columns])
# need to prefix the indexes with the table name to avoid collisions
for i, idx in enumerate(table.indexes):
idx.name = table_name + "." + idx.name + "_ix" + str(i)
cols = []
for i, col in enumerate(table.columns):
# convert mysql-specific types to varchar
#print col.name, col.type, isinstance(col.type, ENUM)
if isinstance(col.type, (LONGBLOB, ENUM)):
if 'sqlite' in connection_string:
col.type = VARCHAR()
elif 'postgres' in connection_string:
if isinstance(col.type, ENUM):
#print dir(col)
col.type = PG_ENUM(*col.type.enums, name=col.name,
create_type=True)
else:
col.type = VARCHAR()
elif str(col.type) == "VARCHAR" \
and ("mysql" in connection_string \
or "postgres" in connection_string):
if col.type.length is None:
col.type.length = 48 if col.name != "description" else None
if not "mysql" in connection_string:
if str(col.type).lower().startswith("set("):
col.type = VARCHAR(15)
cols.append(col)
table = Table(table_name, genome._metadata, *cols,
autoload_replace=True, extend_existing=True)
return table |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mirror(self, tables, dest_url):
""" miror a set of `tables` from `dest_url` Returns a new Genome object Parameters tables : list an iterable of tables dest_url: str a dburl string, e.g. 'sqlite:///local.db' """ |
from mirror import mirror
return mirror(self, tables, dest_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dataframe(self, table):
""" create a pandas dataframe from a table or query Parameters table : table a table in this database or a query limit: integer an integer limit on the query offset: integer an offset for the query """ |
from pandas import DataFrame
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
rec = table.first()
except AttributeError:
rec = table[0]
if hasattr(table, "all"):
records = table.all()
else:
records = [tuple(t) for t in table]
cols = [c.name for c in rec._table.columns]
return DataFrame.from_records(records, columns=cols) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def david_go(refseq_list, annot=('SP_PIR_KEYWORDS', 'GOTERM_BP_FAT', 'GOTERM_CC_FAT', 'GOTERM_MF_FAT')):
""" open a web-browser to the DAVID online enrichment tool Parameters refseq_list : list list of refseq names to check for enrichment annot : list iterable of DAVID annotations to check for enrichment """ |
URL = "http://david.abcc.ncifcrf.gov/api.jsp?type=REFSEQ_MRNA&ids=%s&tool=term2term&annot="
import webbrowser
webbrowser.open(URL % ",".join(set(refseq_list)) + ",".join(annot)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bin_query(self, table, chrom, start, end):
""" perform an efficient spatial query using the bin column if available. The possible bins are calculated from the `start` and `end` sent to this function. Parameters table : str or table table to query chrom : str chromosome for the query start : int 0-based start postion end : int 0-based end position """ |
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
tbl = table._table
except AttributeError:
tbl = table.column_descriptions[0]['type']._table
q = table.filter(tbl.c.chrom == chrom)
if hasattr(tbl.c, "bin"):
bins = Genome.bins(start, end)
if len(bins) < 100:
q = q.filter(tbl.c.bin.in_(bins))
if hasattr(tbl.c, "txStart"):
return q.filter(tbl.c.txStart <= end).filter(tbl.c.txEnd >= start)
return q.filter(tbl.c.chromStart <= end).filter(tbl.c.chromEnd >= start) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upstream(self, table, chrom_or_feat, start=None, end=None, k=1):
""" Return k-nearest upstream features Parameters table : str or table table against which to query chrom_or_feat : str or feat either a chromosome, e.g. 'chr3' or a feature with .chrom, .start, .end attributes start : int if `chrom_or_feat` is a chrom, then this must be the integer start end : int if `chrom_or_feat` is a chrom, then this must be the integer end k : int number of upstream neighbors to return """ |
res = self.knearest(table, chrom_or_feat, start, end, k, "up")
end = getattr(chrom_or_feat, "end", end)
start = getattr(chrom_or_feat, "start", start)
rev = getattr(chrom_or_feat, "strand", "+") == "-"
if rev:
return [x for x in res if x.end > start]
else:
return [x for x in res if x.start < end] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def knearest(self, table, chrom_or_feat, start=None, end=None, k=1, _direction=None):
""" Return k-nearest features Parameters table : str or table table against which to query chrom_or_feat : str or feat either a chromosome, e.g. 'chr3' or a feature with .chrom, .start, .end attributes start : int if `chrom_or_feat` is a chrom, then this must be the integer start end : int if `chrom_or_feat` is a chrom, then this must be the integer end k : int number of downstream neighbors to return _direction : (None, "up", "down") internal (don't use this) """ |
assert _direction in (None, "up", "down")
# they sent in a feature
if start is None:
assert end is None
chrom, start, end = chrom_or_feat.chrom, chrom_or_feat.start, chrom_or_feat.end
# if the query is directional and the feature as a strand,
# adjust...
if _direction in ("up", "down") and getattr(chrom_or_feat,
"strand", None) == "-":
_direction = "up" if _direction == "down" else "up"
else:
chrom = chrom_or_feat
qstart, qend = long(start), long(end)
res = self.bin_query(table, chrom, qstart, qend)
i, change = 1, 350
try:
while res.count() < k:
if _direction in (None, "up"):
if qstart == 0 and _direction == "up": break
qstart = max(0, qstart - change)
if _direction in (None, "down"):
qend += change
i += 1
change *= (i + 5)
res = self.bin_query(table, chrom, qstart, qend)
except BigException:
return []
def dist(f):
d = 0
if start > f.end:
d = start - f.end
elif f.start > end:
d = f.start - end
# add dist as an attribute to the feature
return d
dists = sorted([(dist(f), f) for f in res])
if len(dists) == 0:
return []
dists, res = zip(*dists)
if len(res) == k:
return res
if k > len(res): # had to break because of end of chrom
if k == 0: return []
k = len(res)
ndist = dists[k - 1]
# include all features that are the same distance as the nth closest
# feature (accounts for ties).
while k < len(res) and dists[k] == ndist:
k = k + 1
return res[:k] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate(self, fname, tables, feature_strand=False, in_memory=False, header=None, out=sys.stdout, parallel=False):
""" annotate a file with a number of tables Parameters fname : str or file file name or file-handle tables : list list of tables with which to annotate `fname` feature_strand : bool if this is True, then the up/downstream designations are based on the features in `tables` rather than the features in `fname` in_memoory : bool if True, then tables are read into memory. This usually makes the annotation much faster if there are more than 500 features in `fname` and the number of features in the table is less than 100K. header : str header to print out (if True, use existing header) out : file where to print output parallel : bool if True, use multiprocessing library to execute the annotation of each chromosome in parallel. Uses more memory. """ |
from .annotate import annotate
return annotate(self, fname, tables, feature_strand, in_memory, header=header,
out=out, parallel=parallel) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bins(start, end):
""" Get all the bin numbers for a particular interval defined by (start, end] """ |
if end - start < 536870912:
offsets = [585, 73, 9, 1]
else:
raise BigException
offsets = [4681, 585, 73, 9, 1]
binFirstShift = 17
binNextShift = 3
start = start >> binFirstShift
end = (end - 1) >> binFirstShift
bins = [1]
for offset in offsets:
bins.extend(range(offset + start, offset + end + 1))
start >>= binNextShift
end >>= binNextShift
return frozenset(bins) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found.""" |
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_combine_filenames_generator(filenames, max_length=40):
"""Return a new filename to use as the combined file name for a bunch of files. A precondition is that they all have the same file extension Given that the list of files can have different paths, we aim to use the most common path. Example: /somewhere/else/foo.js /somewhere/bar.js /somewhere/different/too/foobar.js The result will be /somewhere/foo_bar_foobar.js Another thing to note, if the filenames have timestamps in them, combine them all and use the highest timestamp. """ |
path = None
names = []
extension = None
timestamps = []
for filename in filenames:
name = os.path.basename(filename)
if not extension:
extension = os.path.splitext(name)[1]
elif os.path.splitext(name)[1] != extension:
raise ValueError("Can't combine multiple file extensions")
for each in re.finditer('\.\d{10}\.', name):
timestamps.append(int(each.group().replace('.','')))
name = name.replace(each.group(), '.')
name = os.path.splitext(name)[0]
names.append(name)
if path is None:
path = os.path.dirname(filename)
else:
if len(os.path.dirname(filename)) < len(path):
path = os.path.dirname(filename)
new_filename = '_'.join(names)
if timestamps:
new_filename += ".%s" % max(timestamps)
new_filename = new_filename[:max_length]
new_filename += extension
return os.path.join(path, new_filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overlaps(self, other):
""" check for overlap with the other interval """ |
if self.chrom != other.chrom: return False
if self.start >= other.end: return False
if other.start >= self.end: return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_upstream_of(self, other):
""" check if this is upstream of the `other` interval taking the strand of the other interval into account """ |
if self.chrom != other.chrom: return None
if getattr(other, "strand", None) == "+":
return self.end <= other.start
# other feature is on - strand, so this must have higher start
return self.start >= other.end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gene_features(self):
""" return a list of features for the gene features of this object. This would include exons, introns, utrs, etc. """ |
nm, strand = self.gene_name, self.strand
feats = [(self.chrom, self.start, self.end, nm, strand, 'gene')]
for feat in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
fname = feat[:-1] if feat[-1] == 's' else feat
res = getattr(self, feat)
if res is None or all(r is None for r in res): continue
if not isinstance(res, list): res = [res]
feats.extend((self.chrom, s, e, nm, strand, fname) for s, e in res)
tss = self.tss(down=1)
if tss is not None:
feats.append((self.chrom, tss[0], tss[1], nm, strand, 'tss'))
prom = self.promoter()
feats.append((self.chrom, prom[0], prom[1], nm, strand, 'promoter'))
return sorted(feats, key=itemgetter(1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tss(self, up=0, down=0):
""" Return a start, end tuple of positions around the transcription-start site Parameters up : int if greature than 0, the strand is used to add this many upstream bases in the appropriate direction down : int if greature than 0, the strand is used to add this many downstream bases into the gene. """ |
if not self.is_gene_pred: return None
tss = self.txEnd if self.strand == '-' else self.txStart
start, end = tss, tss
if self.strand == '+':
start -= up
end += down
else:
start += up
end -= down
start, end = end, start
return max(0, start), max(end, start, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def promoter(self, up=2000, down=0):
""" Return a start, end tuple of positions for the promoter region of this gene Parameters up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene. """ |
if not self.is_gene_pred: return None
return self.tss(up=up, down=down) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cds(self):
"""just the parts of the exons that are translated""" |
ces = self.coding_exons
if len(ces) < 1: return ces
ces[0] = (self.cdsStart, ces[0][1])
ces[-1] = (ces[-1][0], self.cdsEnd)
assert all((s < e for s, e in ces))
return ces |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_downstream_of(self, other):
""" return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account """ |
if self.chrom != other.chrom: return None
if getattr(other, "strand", None) == "-":
# other feature is on - strand, so this must have higher start
return self.end <= other.start
return self.start >= other.end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def utr5(self):
""" return the 5' UTR if appropriate """ |
if not self.is_coding or len(self.exons) < 2: return (None, None)
if self.strand == "+":
s, e = (self.txStart, self.cdsStart)
else:
s, e = (self.cdsEnd, self.txEnd)
if s == e: return (None, None)
return s, e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sequence(self, per_exon=False):
""" Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented """ |
db = self.db
if not per_exon:
start = self.txStart + 1
return _sequence(db, self.chrom, start, self.txEnd)
else:
# TODO: use same strategy as cds_sequence to reduce # of requests.
seqs = []
for start, end in self.exons:
seqs.append(_sequence(db, self.chrom, start + 1, end))
return seqs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ncbi_blast(self, db="nr", megablast=True, sequence=None):
""" perform an NCBI blast against the sequence of this feature """ |
import requests
requests.defaults.max_retries = 4
assert sequence in (None, "cds", "mrna")
seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence))
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
timeout=20,
data=dict(
PROGRAM="blastn",
#EXPECT=2,
DESCRIPTIONS=100,
ALIGNMENTS=0,
FILTER="L", # low complexity
CMD="Put",
MEGABLAST=True,
DATABASE=db,
QUERY=">%s\n%s" % (self.name, seq)
)
)
if not ("RID =" in r.text and "RTOE" in r.text):
print("no results", file=sys.stderr)
raise StopIteration
rid = r.text.split("RID = ")[1].split("\n")[0]
import time
time.sleep(4)
print("checking...", file=sys.stderr)
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
data=dict(RID=rid, format="Text",
DESCRIPTIONS=100,
DATABASE=db,
CMD="Get", ))
while "Status=WAITING" in r.text:
print("checking...", file=sys.stderr)
time.sleep(10)
r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi',
data=dict(RID=rid, format="Text",
CMD="Get", ))
for rec in _ncbi_parse(r.text):
yield rec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blat(self, db=None, sequence=None, seq_type="DNA"):
""" make a request to the genome-browsers BLAT interface sequence is one of None, "mrna", "cds" returns a list of features that are hits to this sequence. """ |
from . blat_blast import blat, blat_all
assert sequence in (None, "cds", "mrna")
seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence))
if isinstance(db, (tuple, list)):
return blat_all(seq, self.gene_name, db, seq_type)
else:
return blat(seq, self.gene_name, db or self.db, seq_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bed(self, *attrs, **kwargs):
""" return a bed formatted string of this feature """ |
exclude = ("chrom", "start", "end", "txStart", "txEnd", "chromStart",
"chromEnd")
if self.is_gene_pred:
return self.bed12(**kwargs)
return "\t".join(map(str, (
[self.chrom, self.start, self.end] +
[getattr(self, attr) for attr in attrs if not attr in exclude]
))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dereference_url(url):
""" Makes a HEAD request to find the final destination of a URL after following any redirects """ |
res = open_url(url, method='HEAD')
res.close()
return res.url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(url, **kwargs):
""" Read the contents of a URL into memory, return """ |
response = open_url(url, **kwargs)
try:
return response.read()
finally:
response.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_extracted_paths(namelist, subdir=None):
""" Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract """ |
def relpath(p):
# relpath strips a trailing sep
# Windows paths may also use unix sep
q = os.path.relpath(p)
if p.endswith(os.path.sep) or p.endswith('/'):
q += os.path.sep
return q
parent = os.path.abspath('.')
if subdir:
if os.path.isabs(subdir):
raise FileException('subdir must be a relative path', subdir)
subdir = relpath(subdir + os.path.sep)
for name in namelist:
if os.path.commonprefix([parent, os.path.abspath(name)]) != parent:
raise FileException('Insecure path in zipfile', name)
if subdir and os.path.commonprefix(
[subdir, relpath(name)]) != subdir:
raise FileException(
'Path in zipfile is not in required subdir', name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_as_local_path(path, overwrite, progress=0, httpuser=None, httppassword=None):
""" Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory """ |
m = re.match('([A-Za-z]+)://', path)
if m:
# url_open handles multiple protocols so don't bother validating
log.debug('Detected URL protocol: %s', m.group(1))
# URL should use / as the pathsep
localpath = path.split('/')[-1]
if not localpath:
raise FileException(
'Remote path appears to be a directory', path)
if os.path.exists(localpath):
if overwrite == 'error':
raise FileException('File already exists', localpath)
elif overwrite == 'keep':
log.info('Keeping existing %s', localpath)
elif overwrite == 'backup':
rename_backup(localpath)
download(path, localpath, progress, httpuser=httpuser,
httppassword=httppassword)
else:
raise Exception('Invalid overwrite flag: %s' % overwrite)
else:
download(path, localpath, progress, httpuser=httpuser,
httppassword=httppassword)
else:
localpath = path
log.debug("Local path: %s", localpath)
if os.path.isdir(localpath):
return 'directory', localpath
if os.path.exists(localpath):
return 'file', localpath
# Somethings gone very wrong
raise Exception('Local path does not exist: %s' % localpath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(fs, channels, application):
"""Allocates and initializes an encoder state.""" |
result_code = ctypes.c_int()
result = _create(fs, channels, application, ctypes.byref(result_code))
if result_code.value is not constants.OK:
raise OpusError(result_code.value)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode(encoder, pcm, frame_size, max_data_bytes):
"""Encodes an Opus frame Returns string output payload """ |
pcm = ctypes.cast(pcm, c_int16_pointer)
data = (ctypes.c_char * max_data_bytes)()
result = _encode(encoder, pcm, frame_size, data, max_data_bytes)
if result < 0:
raise OpusError(result)
return array.array('c', data[:result]).tostring() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_float(encoder, pcm, frame_size, max_data_bytes):
"""Encodes an Opus frame from floating point input""" |
pcm = ctypes.cast(pcm, c_float_pointer)
data = (ctypes.c_char * max_data_bytes)()
result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes)
if result < 0:
raise OpusError(result)
return array.array('c', data[:result]).tostring() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def __parse_tostr(self, text, **kwargs):
'''Builds and returns the MeCab function for parsing Unicode text.
Args:
fn_name: MeCab function name that determines the function
behavior, either 'mecab_sparse_tostr' or
'mecab_nbest_sparse_tostr'.
Returns:
A function definition, tailored to parsing Unicode text and
returning the result as a string suitable for display on stdout,
using either the default or N-best behavior.
'''
n = self.options.get('nbest', 1)
if self._KW_BOUNDARY in kwargs:
patt = kwargs.get(self._KW_BOUNDARY, '.')
tokens = list(self.__split_pattern(text, patt))
text = ''.join([t[0] for t in tokens])
btext = self.__str2bytes(text)
self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)
bpos = 0
self.__mecab.mecab_lattice_set_boundary_constraint(
self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)
for (token, match) in tokens:
bpos += 1
if match:
mark = self.MECAB_INSIDE_TOKEN
else:
mark = self.MECAB_ANY_BOUNDARY
for _ in range(1, len(self.__str2bytes(token))):
self.__mecab.mecab_lattice_set_boundary_constraint(
self.lattice, bpos, mark)
bpos += 1
self.__mecab.mecab_lattice_set_boundary_constraint(
self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)
elif self._KW_FEATURE in kwargs:
features = kwargs.get(self._KW_FEATURE, ())
fd = {morph: self.__str2bytes(feat) for morph, feat in features}
tokens = self.__split_features(text, [e[0] for e in features])
text = ''.join([t[0] for t in tokens])
btext = self.__str2bytes(text)
self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)
bpos = 0
for chunk, match in tokens:
c = len(self.__str2bytes(chunk))
if match == True:
self.__mecab.mecab_lattice_set_feature_constraint(
self.lattice, bpos, bpos+c, fd[chunk])
bpos += c
else:
btext = self.__str2bytes(text)
self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)
self.__mecab.mecab_parse_lattice(self.tagger, self.lattice)
if n > 1:
res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n)
else:
res = self.__mecab.mecab_lattice_tostr(self.lattice)
if res != self.__ffi.NULL:
raw = self.__ffi.string(res)
return self.__bytes2str(raw).strip()
else:
err = self.__mecab.mecab_lattice_strerror(self.lattice)
logger.error(self.__bytes2str(self.__ffi.string(err)))
raise MeCabError(self.__bytes2str(self.__ffi.string(err))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse(self, text, **kwargs):
'''Parse the given text and return result from MeCab.
:param text: the text to parse.
:type text: str
:param as_nodes: return generator of MeCabNodes if True;
or string if False.
:type as_nodes: bool, defaults to False
:param boundary_constraints: regular expression for morpheme boundary
splitting; if non-None and feature_constraints is None, then
boundary constraint parsing will be used.
:type boundary_constraints: str or re
:param feature_constraints: tuple containing tuple instances of
target morpheme and corresponding feature string in order
of precedence; if non-None and boundary_constraints is None,
then feature constraint parsing will be used.
:type feature_constraints: tuple
:return: A single string containing the entire MeCab output;
or a Generator yielding the MeCabNode instances.
:raises: MeCabError
'''
if text is None:
logger.error(self._ERROR_EMPTY_STR)
raise MeCabError(self._ERROR_EMPTY_STR)
elif not isinstance(text, str):
logger.error(self._ERROR_NOTSTR)
raise MeCabError(self._ERROR_NOTSTR)
elif 'partial' in self.options and not text.endswith("\n"):
logger.error(self._ERROR_MISSING_NL)
raise MeCabError(self._ERROR_MISSING_NL)
if self._KW_BOUNDARY in kwargs:
val = kwargs[self._KW_BOUNDARY]
if not isinstance(val, self._REGEXTYPE) and not isinstance(val, str):
logger.error(self._ERROR_BOUNDARY)
raise MeCabError(self._ERROR_BOUNDARY)
elif self._KW_FEATURE in kwargs:
val = kwargs[self._KW_FEATURE]
if not isinstance(val, tuple):
logger.error(self._ERROR_FEATURE)
raise MeCabError(self._ERROR_FEATURE)
as_nodes = kwargs.get(self._KW_ASNODES, False)
if as_nodes:
return self.__parse_tonodes(text, **kwargs)
else:
return self.__parse_tostr(text, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate(tagGroups, terms):
""" create Tag Groups and Child Tags using data from terms dict """ |
rv = []
for pid in tagGroups:
# In testing we may not have complete set
if pid not in terms.keys():
continue
groupData = terms[pid]
groupName = "[%s] %s" % (pid, groupData['name'])
groupDesc = groupData['desc']
children = []
group = dict(name=groupName, desc=groupDesc, set=children)
rv.append(group)
for cid in groupData['children']:
cData = terms[cid]
cName = "[%s] %s" % (cid, cData['name'])
cDesc = cData['desc']
child = dict(name=cName, desc=cDesc)
children.append(child)
return json.dumps(rv, indent=2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_args(self, cmd, args):
""" We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ |
if cmd == 'install':
if args.upgrade:
# Current behaviour: install or upgrade
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --upgrade'))
newinstall = None
else:
# Current behaviour: Server must not exist
newinstall = True
if args.managedb:
# Current behaviour
if args.initdb or args.upgradedb:
raise Stop(10, (
'Deprecated --initdb --upgradedb flags '
'are incompatible with --managedb'))
args.initdb = True
args.upgradedb = True
else:
if args.initdb or args.upgradedb:
log.warn('--initdb and --upgradedb are deprecated, '
'use --managedb')
elif cmd == 'upgrade':
# Deprecated behaviour
log.warn(
'"omero upgrade" is deprecated, use "omego install --upgrade"')
cmd = 'install'
args.upgrade = True
# Deprecated behaviour: Server must exist
newinstall = False
else:
raise Exception('Unexpected command: %s' % cmd)
return args, newinstall |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_database(self):
""" Handle database initialisation and upgrade, taking into account command line arguments """ |
# TODO: When initdb and upgradedb are dropped we can just test
# managedb, but for backwards compatibility we need to support
# initdb without upgradedb and vice-versa
if self.args.initdb or self.args.upgradedb:
db = DbAdmin(self.dir, None, self.args, self.external)
status = db.check()
log.debug('OMERO database upgrade status: %s', status)
else:
log.warn('OMERO database check disabled')
return DB_INIT_NEEDED
if status == DB_INIT_NEEDED:
if self.args.initdb:
log.debug('Initialising OMERO database')
db.init()
else:
log.error('OMERO database not found')
raise Stop(DB_INIT_NEEDED,
'Install/Upgrade failed: OMERO database not found')
elif status == DB_UPGRADE_NEEDED:
log.warn('OMERO database exists but is out of date')
if self.args.upgradedb:
log.debug('Upgrading OMERO database')
db.upgrade()
else:
raise Stop(
DB_UPGRADE_NEEDED,
'Pass --managedb or upgrade your OMERO database manually')
else:
assert status == DB_UPTODATE
return status |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, command):
""" Runs a command as if from the command-line without the need for using popen or subprocess """ |
if isinstance(command, basestring):
command = command.split()
else:
command = list(command)
self.external.omero_cli(command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_schemas(schemas):
"""Sort a list of SQL schemas in order""" |
def keyfun(v):
x = SQL_SCHEMA_REGEXP.match(v).groups()
# x3: 'DEV' should come before ''
return (int(x[0]), x[1], int(x[2]) if x[2] else None,
x[3] if x[3] else 'zzz', int(x[4]))
return sorted(schemas, key=keyfun) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_schema_files(files):
""" Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema. """ |
f_dict = {}
for f in files:
root, ext = os.path.splitext(f)
if ext != ".sql":
continue
vto, vfrom = os.path.split(root)
vto = os.path.split(vto)[1]
if is_schema(vto) and is_schema(vfrom):
f_dict[f] = (vfrom, vto)
return f_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump(self):
""" Dump the database using the postgres custom format """ |
dumpfile = self.args.dumpfile
if not dumpfile:
db, env = self.get_db_args_env()
dumpfile = fileutils.timestamp_filename(
'omero-database-%s' % db['name'], 'pgdump')
log.info('Dumping database to %s', dumpfile)
if not self.args.dry_run:
self.pgdump('-Fc', '-f', dumpfile) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_db_args_env(self):
""" Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults. """ |
db = {
'name': self.args.dbname,
'host': self.args.dbhost,
'user': self.args.dbuser,
'pass': self.args.dbpass
}
if not self.args.no_db_config:
try:
c = self.external.get_config(force=True)
except Exception as e:
log.warn('config.xml not found: %s', e)
c = {}
for k in db:
try:
db[k] = c['omero.db.%s' % k]
except KeyError:
log.info(
'Failed to lookup parameter omero.db.%s, using %s',
k, db[k])
if not db['name']:
raise Exception('Database name required')
env = os.environ.copy()
env['PGPASSWORD'] = db['pass']
return db, env |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def psql(self, *psqlargs):
""" Run a psql command """ |
db, env = self.get_db_args_env()
args = [
'-v', 'ON_ERROR_STOP=on',
'-d', db['name'],
'-h', db['host'],
'-U', db['user'],
'-w', '-A', '-t'
] + list(psqlargs)
stdout, stderr = External.run('psql', args, capturestd=True, env=env)
if stderr:
log.warn('stderr: %s', stderr)
log.debug('stdout: %s', stdout)
return stdout |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pgdump(self, *pgdumpargs):
""" Run a pg_dump command """ |
db, env = self.get_db_args_env()
args = ['-d', db['name'], '-h', db['host'], '-U', db['user'], '-w'
] + list(pgdumpargs)
stdout, stderr = External.run(
'pg_dump', args, capturestd=True, env=env)
if stderr:
log.warn('stderr: %s', stderr)
log.debug('stdout: %s', stdout)
return stdout |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_server_dir(self, dir):
""" Set the directory of the server to be controlled """ |
self.dir = os.path.abspath(dir)
config = os.path.join(self.dir, 'etc', 'grid', 'config.xml')
self.configured = os.path.exists(config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config(self, force=False):
""" Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts """ |
if not force and not self.has_config():
raise Exception('No config file')
configxml = os.path.join(self.dir, 'etc', 'grid', 'config.xml')
if not os.path.exists(configxml):
raise Exception('No config file')
try:
# Attempt to open config.xml read-only, though this flag is not
# present in early versions of OMERO 5.0
c = self._omero.config.ConfigXml(
configxml, exclusive=False, read_only=True)
except TypeError:
c = self._omero.config.ConfigXml(configxml, exclusive=False)
try:
return c.as_map()
finally:
c.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_omero_cli(self):
""" Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called. """ |
if not self.dir:
raise Exception('No server directory set')
if 'omero.cli' in sys.modules:
raise Exception('omero.cli can only be imported once')
log.debug("Setting up omero CLI")
lib = os.path.join(self.dir, "lib", "python")
if not os.path.exists(lib):
raise Exception("%s does not exist!" % lib)
sys.path.insert(0, lib)
import omero
import omero.cli
log.debug("Using omero CLI from %s", omero.cli.__file__)
self.cli = omero.cli.CLI()
self.cli.loadplugins()
self._omero = omero |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_previous_omero_env(self, olddir, savevarsfile):
""" Create a copy of the current environment for interacting with the current OMERO server installation """ |
env = self.get_environment(savevarsfile)
def addpath(varname, p):
if not os.path.exists(p):
raise Exception("%s does not exist!" % p)
current = env.get(varname)
if current:
env[varname] = p + os.pathsep + current
else:
env[varname] = p
olddir = os.path.abspath(olddir)
lib = os.path.join(olddir, "lib", "python")
addpath("PYTHONPATH", lib)
bin = os.path.join(olddir, "bin")
addpath("PATH", bin)
self.old_env = env |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def omero_cli(self, command):
""" Runs a command as if from the OMERO command-line without the need for using popen or subprocess. """ |
assert isinstance(command, list)
if not self.cli:
raise Exception('omero.cli not initialised')
log.info("Invoking CLI [current environment]: %s", " ".join(command))
self.cli.invoke(command, strict=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(exe, args, capturestd=False, env=None):
""" Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr """ |
command = [exe] + args
if env:
log.info("Executing [custom environment]: %s", " ".join(command))
else:
log.info("Executing : %s", " ".join(command))
start = time.time()
# Temp files will be automatically deleted on close()
# If run() throws the garbage collector should call close(), so don't
# bother with try-finally
outfile = None
errfile = None
if capturestd:
outfile = tempfile.TemporaryFile()
errfile = tempfile.TemporaryFile()
# Use call instead of Popen so that stdin is connected to the console,
# in case user input is required
# On Windows shell=True is needed otherwise the modified environment
# PATH variable is ignored. On Unix this breaks things.
r = subprocess.call(
command, env=env, stdout=outfile, stderr=errfile, shell=WINDOWS)
stdout = None
stderr = None
if capturestd:
outfile.seek(0)
stdout = outfile.read()
outfile.close()
errfile.seek(0)
stderr = errfile.read()
errfile.close()
end = time.time()
if r != 0:
log.error("Failed [%.3f s]", end - start)
raise RunException(
"Non-zero return code", exe, args, r, stdout, stderr)
log.info("Completed [%.3f s]", end - start)
return stdout, stderr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def string_support(py3enc):
'''Create byte-to-string and string-to-byte conversion functions for
internal use.
:param py3enc: Encoding used by Python 3 environment.
:type py3enc: str
'''
if sys.version < '3':
def bytes2str(b):
'''Identity, returns the argument string (bytes).'''
return b
def str2bytes(s):
'''Identity, returns the argument string (bytes).'''
return s
else:
def bytes2str(b):
'''Transforms bytes into string (Unicode).'''
return b.decode(py3enc)
def str2bytes(u):
'''Transforms Unicode into string (bytes).'''
return u.encode(py3enc)
return (bytes2str, str2bytes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def splitter_support(py2enc):
'''Create tokenizer for use in boundary constraint parsing.
:param py2enc: Encoding used by Python 2 environment.
:type py2enc: str
'''
if sys.version < '3':
def _fn_sentence(pattern, sentence):
if REGEXTYPE == type(pattern):
if pattern.flags & re.UNICODE:
return sentence.decode(py2enc)
else:
return sentence
else:
return sentence
def _fn_token2str(pattern):
if REGEXTYPE == type(pattern):
if pattern.flags & re.UNICODE:
def _fn(token):
return token.encode(py2enc)
else:
def _fn(token):
return token
else:
def _fn(token):
return token
return _fn
else:
def _fn_sentence(pattern, sentence):
return sentence
def _fn_token2str(pattern):
def _fn(token):
return token
return _fn
def _fn_tokenize_pattern(text, pattern):
pos = 0
sentence = _fn_sentence(pattern, text)
postprocess = _fn_token2str(pattern)
for m in re.finditer(pattern, sentence):
if pos < m.start():
token = postprocess(sentence[pos:m.start()])
yield (token.strip(), False)
pos = m.start()
token = postprocess(sentence[pos:m.end()])
yield (token.strip(), True)
pos = m.end()
if pos < len(sentence):
token = postprocess(sentence[pos:])
yield (token.strip(), False)
def _fn_tokenize_features(text, features):
acc = []
acc.append((text.strip(), False))
for feat in features:
for i,e in enumerate(acc):
if e[1]==False:
tmp = list(_fn_tokenize_pattern(e[0], feat))
if len(tmp) > 0:
acc.pop(i)
acc[i:i] = tmp
return acc
return _fn_tokenize_pattern, _fn_tokenize_features |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upsert(self, doc, namespace, timestamp, update_spec=None):
"""Insert a document into Elasticsearch.""" |
index, doc_type = self._index_and_mapping(namespace)
# No need to duplicate '_id' in source document
doc_id = u(doc.pop("_id"))
metadata = {
'ns': namespace,
'_ts': timestamp
}
# Index the source document, using lowercase namespace as index name.
action = {
'_op_type': 'index',
'_index': index,
'_type': doc_type,
'_id': doc_id,
'_source': self._formatter.format_document(doc)
}
# Index document metadata with original namespace (mixed upper/lower).
meta_action = {
'_op_type': 'index',
'_index': self.meta_index_name,
'_type': self.meta_type,
'_id': doc_id,
'_source': bson.json_util.dumps(metadata)
}
self.index(action, meta_action, doc, update_spec)
# Leave _id, since it's part of the original document
doc['_id'] = doc_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bulk_upsert(self, docs, namespace, timestamp):
"""Insert multiple documents into Elasticsearch.""" |
def docs_to_upsert():
doc = None
for doc in docs:
# Remove metadata and redundant _id
index, doc_type = self._index_and_mapping(namespace)
doc_id = u(doc.pop("_id"))
document_action = {
'_index': index,
'_type': doc_type,
'_id': doc_id,
'_source': self._formatter.format_document(doc)
}
document_meta = {
'_index': self.meta_index_name,
'_type': self.meta_type,
'_id': doc_id,
'_source': {
'ns': namespace,
'_ts': timestamp
}
}
yield document_action
yield document_meta
if doc is None:
raise errors.EmptyDocsError(
"Cannot upsert an empty sequence of "
"documents into Elastic Search")
try:
kw = {}
if self.chunk_size > 0:
kw['chunk_size'] = self.chunk_size
responses = streaming_bulk(client=self.elastic,
actions=docs_to_upsert(),
**kw)
for ok, resp in responses:
if not ok:
LOG.error(
"Could not bulk-upsert document "
"into ElasticSearch: %r" % resp)
if self.auto_commit_interval == 0:
self.commit()
except errors.EmptyDocsError:
# This can happen when mongo-connector starts up, there is no
# config file, but nothing to dump
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove(self, document_id, namespace, timestamp):
"""Remove a document from Elasticsearch.""" |
index, doc_type = self._index_and_mapping(namespace)
action = {
'_op_type': 'delete',
'_index': index,
'_type': doc_type,
'_id': u(document_id)
}
meta_action = {
'_op_type': 'delete',
'_index': self.meta_index_name,
'_type': self.meta_type,
'_id': u(document_id)
}
self.index(action, meta_action) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_buffered_operations(self):
"""Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. """ |
with self.lock:
try:
action_buffer = self.BulkBuffer.get_buffer()
if action_buffer:
successes, errors = bulk(self.elastic, action_buffer)
LOG.debug("Bulk request finished, successfully sent %d "
"operations", successes)
if errors:
LOG.error(
"Bulk request finished with errors: %r", errors)
except es_exceptions.ElasticsearchException:
LOG.exception("Bulk request failed with exception") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_last_doc(self):
"""Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. """ |
try:
result = self.elastic.search(
index=self.meta_index_name,
body={
"query": {"match_all": {}},
"sort": [{"_ts": "desc"}],
},
size=1
)["hits"]["hits"]
for r in result:
r['_source']['_id'] = r['_id']
return r['_source']
except es_exceptions.RequestError:
# no documents so ES returns 400 because of undefined _ts mapping
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_type_signature(sig):
""" Parse a type signature """ |
match = TYPE_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Type signature invalid, got ' + sig)
groups = match.groups()
typ = groups[0]
generic_types = groups[1]
if not generic_types:
generic_types = []
else:
generic_types = split_sig(generic_types[1:-1])
is_array = (groups[2] is not None)
return typ, generic_types, is_array |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_attr_signature(sig):
""" Parse an attribute signature """ |
match = ATTR_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Attribute signature invalid, got ' + sig)
name, _, params = match.groups()
if params is not None and params.strip() != '':
params = split_sig(params)
params = [parse_param_signature(x) for x in params]
else:
params = []
return (name, params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_msdn_ref(name):
""" Try and create a reference to a type on MSDN """ |
in_msdn = False
if name in MSDN_VALUE_TYPES:
name = MSDN_VALUE_TYPES[name]
in_msdn = True
if name.startswith('System.'):
in_msdn = True
if in_msdn:
link = name.split('<')[0]
if link in MSDN_LINK_MAP:
link = MSDN_LINK_MAP[link]
else:
link = link.lower()
url = 'https://msdn.microsoft.com/en-us/library/'+link+'.aspx'
node = nodes.reference(name, shorten_type(name))
node['refuri'] = url
node['reftitle'] = name
return node
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shorten_type(typ):
""" Shorten a type. E.g. drops 'System.' """ |
offset = 0
for prefix in SHORTEN_TYPE_PREFIXES:
if typ.startswith(prefix):
if len(prefix) > offset:
offset = len(prefix)
return typ[offset:] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.