_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q40800 | Socket.connect | train | def connect(self):
"""Connect to the given socket"""
if not self.connected:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(self.address)
self.connected = True | python | {
"resource": ""
} |
q40801 | Socket.send | train | def send(self, data):
"""Send a formatted message to the ADB server"""
self._send_data(int_to_hex(len(data)))
self._send_data(data) | python | {
"resource": ""
} |
q40802 | Socket._send_data | train | def _send_data(self, data):
"""Send data to the ADB server"""
total_sent = 0
while total_sent < len(data):
# Send only the bytes that haven't been
# sent yet
sent = self.socket.send(data[total_sent:].encode("ascii"))
if sent == 0:
self.close()
raise RuntimeError("Socket connection dropped, "
"send failed")
total_sent += sent | python | {
"resource": ""
} |
q40803 | Socket.receive_until_end | train | def receive_until_end(self, timeout=None):
"""
Reads and blocks until the socket closes
Used for the "shell" command, where STDOUT and STDERR
are just redirected to the terminal with no length
"""
if self.receive_fixed_length(4) != "OKAY":
raise SocketError("Socket communication failed: "
"the server did not return a valid response")
# The time at which the receive starts
start_time = time.clock()
output = ""
while True:
if timeout is not None:
self.socket.settimeout(timeout - (time.clock() - start_time))
chunk = ''
try:
chunk = self.socket.recv(4096).decode("ascii")
except socket.timeout:
return output
if not chunk:
return output
output += chunk | python | {
"resource": ""
} |
q40804 | defaults | train | def defaults(f, self, *args, **kwargs):
"""
For ``PARAMETERS`` keys, replace None ``kwargs`` with ``self`` attr values.
Should be applied on the top of any decorator stack so other decorators see
the "right" kwargs.
Will also apply transformations found in ``TRANSFORMS``.
"""
for name, data in PARAMETERS.iteritems():
kwargs[name] = kwargs.get(name) or getattr(self, name)
if 'transform' in data:
kwargs[name] = data['transform'](kwargs[name])
return f(self, *args, **kwargs) | python | {
"resource": ""
} |
q40805 | requires | train | def requires(*params):
"""
Raise ValueError if any ``params`` are omitted from the decorated kwargs.
None values are considered omissions.
Example usage on an AWS() method:
@requires('zone', 'security_groups')
def my_aws_method(self, custom_args, **kwargs):
# We'll only get here if 'kwargs' contained non-None values for
# both 'zone' and 'security_groups'.
"""
def requires(f, self, *args, **kwargs):
missing = filter(lambda x: kwargs.get(x) is None, params)
if missing:
msgs = ", ".join([PARAMETERS[x]['msg'] for x in missing])
raise ValueError("Missing the following parameters: %s" % msgs)
return f(self, *args, **kwargs)
return decorator(requires) | python | {
"resource": ""
} |
q40806 | AWS.get_security_group_id | train | def get_security_group_id(self, name):
"""
Take name string, give back security group ID.
To get around VPC's API being stupid.
"""
# Memoize entire list of groups
if not hasattr(self, '_security_groups'):
self._security_groups = {}
for group in self.get_all_security_groups():
self._security_groups[group.name] = group.id
return self._security_groups[name] | python | {
"resource": ""
} |
q40807 | AWS.get_instance_subnet_name | train | def get_instance_subnet_name(self, instance):
"""
Return a human readable name for given instance's subnet, or None.
Uses stored config mapping of subnet IDs to names.
"""
# TODO: we have to do this here since we are monkeypatching Instance.
# If we switch to custom Instance (sub)class then we could do it in the
# object, provided it has access to the configuration data.
if instance.subnet_id:
# Account for omitted 'subnet-'
subnet = self.config['subnets'][instance.subnet_id[7:]]
else:
subnet = BLANK
return subnet | python | {
"resource": ""
} |
q40808 | AWS.get_subnet_id | train | def get_subnet_id(self, name):
"""
Return subnet ID for given ``name``, if it exists.
E.g. with a subnet mapping of ``{'abc123': 'ops', '67fd56': 'prod'}``,
``get_subnet_id('ops')`` would return ``'abc123'``. If the map has
non-unique values, the first matching key will be returned.
If no match is found, the given ``name`` is returned as-is. This works
well for e.g. normalizing names-or-IDs to just IDs.
"""
for subnet_id, subnet_name in self.config['subnets'].iteritems():
if subnet_name == name:
return subnet_id
return name | python | {
"resource": ""
} |
q40809 | AWS.create | train | def create(self, hostname, **kwargs):
"""
Create new EC2 instance named ``hostname``.
You may specify keyword arguments matching those of ``__init__`` (e.g.
``size``, ``ami``) to override any defaults given when the object was
created, or to fill in parameters not given at initialization time.
Additional parameters that are instance-specific:
* ``ip``: The static private IP address for the new host.
This method returns a ``boto.EC2.instance.Instance`` object.
"""
# Create
creating = "Creating '%s' (a %s instance of %s)" % (
hostname, kwargs['size'], kwargs['ami']
)
with self.msg(creating):
instance = self._create(hostname, kwargs)
# Name
with self.msg("Tagging as '%s'" % hostname):
try:
instance.rename(hostname)
# One-time retry for API errors when setting tags
except _ResponseError:
time.sleep(1)
instance.rename(hostname)
# Wait for it to finish booting
with self.msg("Waiting for boot: "):
tick = 5
while instance.state != 'running':
self.log(".", end='')
time.sleep(tick)
instance.update()
return instance | python | {
"resource": ""
} |
q40810 | AWS.get | train | def get(self, arg):
"""
Return instance object with given EC2 ID or nametag.
"""
try:
reservations = self.get_all_instances(filters={'tag:Name': [arg]})
instance = reservations[0].instances[0]
except IndexError:
try:
instance = self.get_all_instances([arg])[0].instances[0]
except (_ResponseError, IndexError):
# TODO: encapsulate actual exception for debugging
err = "Can't find any instance with name or ID '%s'" % arg
raise ValueError(err)
return instance | python | {
"resource": ""
} |
q40811 | AWS.get_volumes_for_instance | train | def get_volumes_for_instance(self, arg, device=None):
"""
Return all EC2 Volume objects attached to ``arg`` instance name or ID.
May specify ``device`` to limit to the (single) volume attached as that
device.
"""
instance = self.get(arg)
filters = {'attachment.instance-id': instance.id}
if device is not None:
filters['attachment.device'] = device
return self.get_all_volumes(filters=filters) | python | {
"resource": ""
} |
q40812 | AWS.terminate | train | def terminate(self, arg):
"""
Terminate instance with given EC2 ID or nametag.
"""
instance = self.get(arg)
with self.msg("Terminating %s (%s): " % (instance.name, instance.id)):
instance.rename("old-%s" % instance.name)
instance.terminate()
while instance.state != 'terminated':
time.sleep(5)
self.log(".", end='')
instance.update() | python | {
"resource": ""
} |
q40813 | ZNodeMap._set | train | def _set(self, data, version):
"""serialize and set data to self.path."""
self.zk.set(self.path, json.dumps(data), version) | python | {
"resource": ""
} |
q40814 | Jones.get_config | train | def get_config(self, hostname):
"""
Returns a configuration for hostname.
"""
version, config = self._get(
self.associations.get(hostname)
)
return config | python | {
"resource": ""
} |
q40815 | Jones.get_view_by_env | train | def get_view_by_env(self, env):
"""
Returns the view of `env`.
"""
version, data = self._get(self._get_view_path(env))
return data | python | {
"resource": ""
} |
q40816 | Jones.assoc_host | train | def assoc_host(self, hostname, env):
"""
Associate a host with an environment.
hostname is opaque to Jones.
Any string which uniquely identifies a host is acceptable.
"""
dest = self._get_view_path(env)
self.associations.set(hostname, dest) | python | {
"resource": ""
} |
q40817 | Jones.get_associations | train | def get_associations(self, env):
"""
Get all the associations for this env.
Root cannot have associations, so return None for root.
returns a map of hostnames to environments.
"""
if env.is_root:
return None
associations = self.associations.get_all()
return [assoc for assoc in associations
if associations[assoc] == self._get_view_path(env)] | python | {
"resource": ""
} |
q40818 | Jones._flatten_from_root | train | def _flatten_from_root(self, env):
"""
Flatten values from root down in to new view.
"""
nodes = env.components
# Path through the znode graph from root ('') to env
path = [nodes[:n] for n in xrange(len(nodes) + 1)]
# Expand path and map it to the root
path = map(
self._get_env_path,
[Env('/'.join(p)) for p in path]
)
data = {}
for n in path:
_, config = self._get(n)
data.update(config)
return data | python | {
"resource": ""
} |
q40819 | Serializer.encode | train | def encode(self, value):
"""Encode value."""
value = self.serialize(value)
if self.encoding:
value = value.encode(self.encoding)
return value | python | {
"resource": ""
} |
q40820 | Serializer.decode | train | def decode(self, value):
"""Decode value."""
if self.encoding:
value = value.decode(self.encoding)
return self.deserialize(value) | python | {
"resource": ""
} |
q40821 | install | train | def install(packagename, save, save_dev, save_test, filename):
"""
Install the package via pip, pin the package only to requirements file.
Use option to decide which file the package will be pinned to.
"""
print('Installing ', packagename)
print(sh_pip.install(packagename))
if not filename:
filename = get_filename(save, save_dev, save_test)
try:
add_requirements(packagename, filename)
except AssertionError:
print('Package already pinned in ', filename) | python | {
"resource": ""
} |
q40822 | remove | train | def remove(packagename, save, save_dev, save_test, filename):
"""
Uninstall the package and remove it from requirements file.
"""
print(sh_pip.uninstall(packagename, "-y"))
if not filename:
filename = get_filename(save, save_dev, save_test)
remove_requirements(packagename, filename) | python | {
"resource": ""
} |
q40823 | DKCloudCommandRunner.is_subdirectory | train | def is_subdirectory(potential_subdirectory, expected_parent_directory):
"""
Is the first argument a sub-directory of the second argument?
:param potential_subdirectory:
:param expected_parent_directory:
:return: True if the potential_subdirectory is a child of the expected parent directory
"""
def _get_normalized_parts(path):
return DKCloudCommandRunner.os_path_split_asunder(os.path.realpath(os.path.abspath(os.path.normpath(path))))
# make absolute and handle symbolic links, split into components
sub_parts = _get_normalized_parts(potential_subdirectory)
parent_parts = _get_normalized_parts(expected_parent_directory)
if len(parent_parts) > len(sub_parts):
# a parent directory never has more path segments than its child
return False
# we expect the zip to end with the short path, which we know to be the parent
return all(part1 == part2 for part1, part2 in zip(sub_parts, parent_parts)) | python | {
"resource": ""
} |
q40824 | DKCloudCommandRunner._split_one_end | train | def _split_one_end(path):
"""
Utility function for splitting off the very end part of a path.
"""
s = path.rsplit('/', 1)
if len(s) == 1:
return s[0], ''
else:
return tuple(s) | python | {
"resource": ""
} |
q40825 | get_system_config_directory | train | def get_system_config_directory():
"""
Return platform specific config directory.
"""
if platform.system().lower() == 'windows':
_cfg_directory = Path(os.getenv('APPDATA') or '~')
elif platform.system().lower() == 'darwin':
_cfg_directory = Path('~', 'Library', 'Preferences')
else:
_cfg_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config')
logger.debug('Fetching configt directory for {}.'
.format(platform.system()))
return _cfg_directory.joinpath(Path('mayalauncher/.config')) | python | {
"resource": ""
} |
q40826 | get_version_exec_mapping_from_path | train | def get_version_exec_mapping_from_path(path):
"""
Find valid application version from given path object and return
a mapping of version, executable.
"""
version_executable = {}
logger.debug('Getting exes from path: {}'.format(path))
for sub_dir in path.iterdir():
if not sub_dir.name.startswith(APPLICATION_NAME):
continue
release = sub_dir.name.split(APPLICATION_NAME)[-1]
executable = Path(sub_dir, 'bin').glob('maya.exe').next()
version_executable[release] = str(executable)
logger.debug('Found exes for: {}'.format(version_executable.keys()))
return version_executable | python | {
"resource": ""
} |
q40827 | find_applications_on_system | train | def find_applications_on_system():
"""
Collect maya version from Autodesk PATH if exists, else try looking
for custom executable paths from config file.
"""
# First we collect maya versions from the Autodesk folder we presume
# is addeed to the system environment "PATH"
path_env = os.getenv('PATH').split(os.pathsep)
versions = {}
for each in path_env:
path = Path(os.path.expandvars(each))
if not path.exists():
continue
if path.name.endswith(DEVELOPER_NAME):
if not path.exists():
continue
versions.update(get_version_exec_mapping_from_path(path))
return versions | python | {
"resource": ""
} |
q40828 | build_config | train | def build_config(config_file=get_system_config_directory()):
"""
Construct the config object from necessary elements.
"""
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
# Add found versions to config if they don't exist. Versions found
# in the config file takes precedence over versions found in PATH.
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config | python | {
"resource": ""
} |
q40829 | get_environment_paths | train | def get_environment_paths(config, env):
"""
Get environment paths from given environment variable.
"""
if env is None:
return config.get(Config.DEFAULTS, 'environment')
# Config option takes precedence over environment key.
if config.has_option(Config.ENVIRONMENTS, env):
env = config.get(Config.ENVIRONMENTS, env).replace(' ', '').split(';')
else:
env = os.getenv(env)
if env:
env = env.split(os.pathsep)
return [i for i in env if i] | python | {
"resource": ""
} |
q40830 | build_maya_environment | train | def build_maya_environment(config, env=None, arg_paths=None):
"""
Construct maya environment.
"""
maya_env = MayaEnvironment()
maya_env.exclude_pattern = config.get_list(Config.PATTERNS, 'exclude')
maya_env.icon_extensions = config.get_list(Config.PATTERNS, 'icon_ext')
env = get_environment_paths(config, env)
if not env and arg_paths is None:
return logger.info('Using maya factory environment setup.')
logger.debug('Launching with addon paths: {}'.format(arg_paths))
logger.debug('Launching with environment paths: {}'.format(env))
if arg_paths:
arg_paths = arg_paths.split(' ')
for directory in flatten_combine_lists(env, arg_paths or ''):
maya_env.traverse_path_for_valid_application_paths(directory)
return maya_env | python | {
"resource": ""
} |
q40831 | launch | train | def launch(exec_, args):
"""
Launches application.
"""
if not exec_:
raise RuntimeError(
'Mayalauncher could not find a maya executable, please specify'
'a path in the config file (-e) or add the {} directory location'
'to your PATH system environment.'.format(DEVELOPER_NAME)
)
# Launch Maya
if args.debug:
return
watched = WatchFile()
cmd = [exec_] if args.file is None else [exec_, args.file]
cmd.extend(['-hideConsole', '-log', watched.path])
if args.debug:
cmd.append('-noAutoloadPlugins')
maya = subprocess.Popen(cmd)
# Maya 2016 stupid clic ipm
# os.environ['MAYA_DISABLE_CLIC_IPM'] = '1'
# os.environ['MAYA_DISABLE_CIP'] = '1'
# os.environ['MAYA_OPENCL_IGNORE_DRIVER_VERSION'] = '1'
while True:
time.sleep(1)
maya.poll()
watched.check()
if maya.returncode is not None:
if not maya.returncode == 0:
maya = subprocess.Popen(cmd)
else:
watched.stop()
break | python | {
"resource": ""
} |
q40832 | Config._create_default_config_file | train | def _create_default_config_file(self):
"""
If config file does not exists create and set default values.
"""
logger.info('Initialize Maya launcher, creating config file...\n')
self.add_section(self.DEFAULTS)
self.add_section(self.PATTERNS)
self.add_section(self.ENVIRONMENTS)
self.add_section(self.EXECUTABLES)
self.set(self.DEFAULTS, 'executable', None)
self.set(self.DEFAULTS, 'environment', None)
self.set(self.PATTERNS, 'exclude', ', '.join(self.EXLUDE_PATTERNS))
self.set(self.PATTERNS, 'icon_ext', ', '.join(self.ICON_EXTENSIONS))
self.config_file.parent.mkdir(exist_ok=True)
self.config_file.touch()
with self.config_file.open('wb') as f:
self.write(f)
# If this function is run inform the user that a new file has been
# created.
sys.exit('Maya launcher has successfully created config file at:\n'
' "{}"'.format(str(self.config_file))) | python | {
"resource": ""
} |
q40833 | Config.get_list | train | def get_list(self, section, option):
"""
Convert string value to list object.
"""
if self.has_option(section, option):
return self.get(section, option).replace(' ', '').split(',')
else:
raise KeyError('{} with {} does not exist.'.format(section,
option)) | python | {
"resource": ""
} |
q40834 | Config.edit | train | def edit(self):
"""
Edit file with default os application.
"""
if platform.system().lower() == 'windows':
os.startfile(str(self.config_file))
else:
if platform.system().lower() == 'darwin':
call = 'open'
else:
call = 'xdg-open'
subprocess.call([call, self.config_file]) | python | {
"resource": ""
} |
q40835 | MayaEnvironment.is_excluded | train | def is_excluded(self, path, exclude=None):
"""
Return if path is in exclude pattern.
"""
for pattern in (exclude or self.exclude_pattern):
if path.match(pattern):
return True
else:
return False | python | {
"resource": ""
} |
q40836 | MayaEnvironment.put_path | train | def put_path(self, path):
"""
Given path identify in which environment the path belong to and
append it.
"""
if self.is_package(path):
logger.debug('PYTHON PACKAGE: {}'.format(path))
self.python_paths.append(path.parent)
site.addsitedir(str(path.parent))
xbmdirs = self.get_directories_with_extensions(
path,
self.icon_extensions,
)
self.xbmlang_paths.extend(xbmdirs)
return
if self.has_next(path.glob('*.' + self.MEL)):
logger.debug('MEL: {}'.format(str(path)))
self.script_paths.append(path)
if self.has_next(path.glob('*.' + self.PYTHON)):
logger.debug('PYTHONPATH: {}'.format(str(path)))
self.python_paths.append(path)
site.addsitedir(str(path))
if self.PLUGIN in list(path.iterdir()):
logger.debug('PLUG-IN: {}'.format(str(path)))
self.plug_in_paths.append(path)
for ext in self.icon_extensions:
if self.has_next(path.glob('*.' + ext)):
logger.debug('XBM: {}.'.format(str(path)))
self.xbmlang_paths.append(path)
break | python | {
"resource": ""
} |
q40837 | MayaEnvironment.traverse_path_for_valid_application_paths | train | def traverse_path_for_valid_application_paths(self, top_path):
"""
For every path beneath top path that does not contain the exclude
pattern look for python, mel and images and place them in their
corresponding system environments.
"""
self.put_path(Path(top_path))
for p in self._walk(top_path):
self.put_path(p) | python | {
"resource": ""
} |
q40838 | ApiClient.get_dataset | train | def get_dataset(self, datasetid):
"""The method is getting information about dataset byt it's id"""
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.Dataset, path.format(datasetid)) | python | {
"resource": ""
} |
q40839 | ApiClient.get_dimension | train | def get_dimension(self, dataset, dimension):
"""The method is getting information about dimension with items"""
path = '/api/1.0/meta/dataset/{}/dimension/{}'
return self._api_get(definition.Dimension, path.format(dataset, dimension)) | python | {
"resource": ""
} |
q40840 | ApiClient.get_daterange | train | def get_daterange(self, dataset):
"""The method is getting information about date range of dataset"""
path = '/api/1.0/meta/dataset/{}/daterange'
return self._api_get(definition.DateRange, path.format(dataset)) | python | {
"resource": ""
} |
q40841 | ApiClient.get_data | train | def get_data(self, pivotrequest):
"""The method is getting data by pivot request"""
path = '/api/1.0/data/pivot/'
return self._api_post(definition.PivotResponse, path, pivotrequest) | python | {
"resource": ""
} |
q40842 | ApiClient.get_data_raw | train | def get_data_raw(self, request):
"""The method is getting data by raw request"""
path = '/api/1.0/data/raw/'
res = self._api_post(definition.RawDataResponse, path, request)
token = res.continuation_token
while token is not None:
res2 = self.get_data_raw_with_token(token)
res.series += res2.series
token = res2.continuation_token
return res | python | {
"resource": ""
} |
q40843 | ApiClient.get_mnemonics | train | def get_mnemonics (self, mnemonics):
"""The method get series by mnemonics"""
path = '/api/1.0/data/mnemonics?mnemonics={0}'
return self._api_get(definition.MnemonicsResponseList, path.format(mnemonics)) | python | {
"resource": ""
} |
q40844 | ApiClient.upload_file | train | def upload_file(self, file):
"""The method is posting file to the remote server"""
url = self._get_url('/api/1.0/upload/post')
fcontent = FileContent(file)
binary_data = fcontent.get_binary()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
req.add_header('Content-type', fcontent.get_content_type())
req.add_header('Content-length', len(binary_data))
resp = urllib.request.urlopen(req)
return definition.UploadPostResponse(_response_to_json(resp)) | python | {
"resource": ""
} |
q40845 | ApiClient.upload_verify | train | def upload_verify(self, file_location, dataset=None):
"""This method is verifiing posted file on server"""
path = '/api/1.0/upload/verify'
query = 'doNotGenerateAdvanceReport=true&filePath={}'.format(file_location)
if dataset:
query = 'doNotGenerateAdvanceReport=true&filePath={}&datasetId={}'.format(file_location, dataset)
return self._api_get(definition.UploadVerifyResponse, path, query) | python | {
"resource": ""
} |
q40846 | ApiClient.upload_submit | train | def upload_submit(self, upload_request):
"""The method is submitting dataset upload"""
path = '/api/1.0/upload/save'
return self._api_post(definition.DatasetUploadResponse, path, upload_request) | python | {
"resource": ""
} |
q40847 | ApiClient.upload_status | train | def upload_status(self, upload_id):
"""The method is checking status of uploaded dataset"""
path = '/api/1.0/upload/status'
query = 'id={}'.format(upload_id)
return self._api_get(definition.DatasetUploadStatusResponse, path, query) | python | {
"resource": ""
} |
q40848 | ApiClient.delete | train | def delete(self, dataset):
"""The method is deleting dataset by it's id"""
url = self._get_url('/api/1.0/meta/dataset/{}/delete'.format(dataset))
json_data = ''
binary_data = json_data.encode()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
resp = urllib.request.urlopen(req)
str_response = resp.read().decode('utf-8')
if str_response != '"successful"' or resp.status < 200 or resp.status >= 300:
msg = 'Dataset has not been deleted, because of the following error(s): {}'.format(str_response)
raise ValueError(msg) | python | {
"resource": ""
} |
q40849 | ApiClient.verify | train | def verify(self, dataset, publication_date, source, refernce_url):
"""The method is verifying dataset by it's id"""
path = '/api/1.0/meta/verifydataset'
req = definition.DatasetVerifyRequest(dataset, publication_date, source, refernce_url)
result = self._api_post(definition.DatasetVerifyResponse, path, req)
if result.status == 'failed':
ver_err = '\r\n'.join(result.errors)
msg = 'Dataset has not been verified, because of the following error(s): {}'.format(ver_err)
raise ValueError(msg) | python | {
"resource": ""
} |
q40850 | FileContent.get_binary | train | def get_binary(self):
"""Return a binary buffer containing the file content"""
content_disp = 'Content-Disposition: form-data; name="file"; filename="{}"'
stream = io.BytesIO()
stream.write(_string_to_binary('--{}'.format(self.boundary)))
stream.write(_crlf())
stream.write(_string_to_binary(content_disp.format(self.file_name)))
stream.write(_crlf())
stream.write(_crlf())
stream.write(self.body)
stream.write(_crlf())
stream.write(_string_to_binary('--{}--'.format(self.boundary)))
stream.write(_crlf())
return stream.getvalue() | python | {
"resource": ""
} |
q40851 | QueryCache._scratch_stream_name | train | def _scratch_stream_name(self):
"""
A unique cache stream name for this QueryCache.
Hashes the necessary facts about this QueryCache to generate a
unique cache stream name. Different `query_function`
implementations at different `bucket_width` values will be cached
to different streams.
TODO(marcua): This approach won't work for dynamically-generated
functions. We will want to either:
1) Hash the function closure/containing scope.
2) Ditch this approach and rely on the caller to tell us all the
information that makes this function unique.
"""
query_details = [
str(QueryCache.QUERY_CACHE_VERSION),
str(self._bucket_width),
binascii.b2a_hex(marshal.dumps(self._query_function.func_code)),
str(self._query_function_args),
str(self._query_function_kwargs),
]
return hashlib.sha512('$'.join(query_details)).hexdigest()[:20] | python | {
"resource": ""
} |
q40852 | QueryCache._bucket_time | train | def _bucket_time(self, event_time):
"""
The seconds since epoch that represent a computed bucket.
An event bucket is the time of the earliest possible event for
that `bucket_width`. Example: if `bucket_width =
timedelta(minutes=10)`, bucket times will be the number of seconds
since epoch at 12:00, 12:10, ... on each day.
"""
event_time = kronos_time_to_epoch_time(event_time)
return event_time - (event_time % self._bucket_width) | python | {
"resource": ""
} |
q40853 | QueryCache._bucket_events | train | def _bucket_events(self, event_iterable):
"""
Convert an iterable of events into an iterable of lists of events
per bucket.
"""
current_bucket_time = None
current_bucket_events = None
for event in event_iterable:
event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD])
if current_bucket_time is None or current_bucket_time < event_bucket_time:
if current_bucket_events is not None:
yield current_bucket_events
current_bucket_time = event_bucket_time
current_bucket_events = []
current_bucket_events.append(event)
if current_bucket_events is not None and current_bucket_events != []:
yield current_bucket_events | python | {
"resource": ""
} |
q40854 | QueryCache._cached_results | train | def _cached_results(self, start_time, end_time):
"""
Retrieves cached results for any bucket that has a single cache entry.
If a bucket has two cache entries, there is a chance that two
different writers previously computed and cached a result since
Kronos has no transaction semantics. While it might be safe to
return one of the cached results if there are multiple, we
currently do the safe thing and pretend we have no previously
computed data for this bucket.
"""
cached_buckets = self._bucket_events(
self._client.get(self._scratch_stream, start_time, end_time,
namespace=self._scratch_namespace))
for bucket_events in cached_buckets:
# If we have multiple cache entries for the same bucket, pretend
# we have no results for that bucket.
if len(bucket_events) == 1:
first_result = bucket_events[0]
yield (kronos_time_to_epoch_time(first_result[TIMESTAMP_FIELD]),
first_result[QueryCache.CACHE_KEY]) | python | {
"resource": ""
} |
q40855 | QueryCache.compute_and_cache_missing_buckets | train | def compute_and_cache_missing_buckets(self, start_time, end_time,
untrusted_time, force_recompute=False):
"""
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation. For any buckets
where all events would have occurred before `untrusted_time`,
cache the results.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param untrusted_time: A datetime after which to not trust that
computed data is stable. Any buckets that overlap with or follow
this untrusted_time will not be cached.
:param force_recompute: A boolean that, if True, will force
recompute and recaching of even previously cached data.
"""
if untrusted_time and not untrusted_time.tzinfo:
untrusted_time = untrusted_time.replace(tzinfo=tzutc())
events = self._compute_buckets(start_time, end_time, compute_missing=True,
cache=True, untrusted_time=untrusted_time,
force_recompute=force_recompute)
for event in events:
yield event | python | {
"resource": ""
} |
q40856 | QueryCache.retrieve_interval | train | def retrieve_interval(self, start_time, end_time, compute_missing=False):
"""
Return the results for `query_function` on every `bucket_width`
time period between `start_time` and `end_time`. Look for
previously cached results to avoid recomputation.
:param start_time: A datetime for the beginning of the range,
aligned with `bucket_width`.
:param end_time: A datetime for the end of the range, aligned with
`bucket_width`.
:param compute_missing: A boolean that, if True, will compute any
non-cached results.
"""
events = self._compute_buckets(start_time, end_time,
compute_missing=compute_missing)
for event in events:
yield event | python | {
"resource": ""
} |
q40857 | Scheduler._loop | train | def _loop(self, reader):
"""Main execution loop of the scheduler.
The loop runs every second. Between iterations, the loop listens for
schedule or cancel requests coming from Flask via over the gipc pipe
(reader) and modifies the queue accordingly.
When a task completes, it is rescheduled
"""
results = set()
while True:
now = datetime.datetime.now()
if self._task_queue and self._task_queue[0][0] <= now:
task = heappop(self._task_queue)[1]
if task['id'] not in self._pending_cancels:
result = self._executor.submit(_execute, task)
results.add(result)
else:
self._pending_cancels.remove(task['id'])
else:
# Check for new tasks coming from HTTP
with gevent.Timeout(0.5, False) as t:
message = reader.get(timeout=t)
if message[0] == 'schedule':
self._schedule(message[1], next_run=now)
elif message[0] == 'cancel':
self._cancel(message[1])
# Reschedule completed tasks
if not results:
gevent.sleep(0.5)
continue
ready = self._executor.wait(results, num=1, timeout=0.5)
for result in ready:
results.remove(result)
if result.value:
task = result.value
interval = int(task['interval'])
if interval:
run_at = now + datetime.timedelta(seconds=int(task['interval']))
self._schedule(task, next_run=run_at)
else:
err_msg = result.exception
sys.stderr.write("ERROR: %s" % err_msg)
email_msg = 'Task %s failed at %s\n\n%s' % (
task['id'],
datetime.datetime.now(),
err_msg
)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure',
email_msg) | python | {
"resource": ""
} |
q40858 | token_protected_endpoint | train | def token_protected_endpoint(function):
"""Requires valid auth_token in POST to access
An auth_token is built by sending a dictionary built from a
Werkzeug.Request.form to the scheduler.auth.create_token function.
"""
@wraps(function)
def decorated(*args, **kwargs):
auth_token = request.form.get('auth_token')
if not auth_token:
return json.dumps({
'status': 'fail',
'reason': 'You must provide an auth_token',
})
data = dict(request.form)
del data['auth_token']
correct_token = create_token(current_app.config['SECRET_KEY'], data)
if _compare_digest(auth_token, correct_token):
return function(*args, **kwargs)
else:
return json.dumps({
'status': 'fail',
'reason': 'Incorrect auth_token',
})
return decorated | python | {
"resource": ""
} |
q40859 | molmz | train | def molmz(df, noise=10000):
"""
The mz of the molecular ion.
"""
d = ((df.values > noise) * df.columns).max(axis=1)
return Trace(d, df.index, name='molmz') | python | {
"resource": ""
} |
q40860 | mzminus | train | def mzminus(df, minus=0, noise=10000):
"""
The abundances of ions which are minus below the molecular ion.
"""
mol_ions = ((df.values > noise) * df.columns).max(axis=1) - minus
mol_ions[np.abs(mol_ions) < 0] = 0
d = np.abs(np.ones(df.shape) * df.columns -
(mol_ions[np.newaxis].T * np.ones(df.shape))) < 1
d = (df.values * d).sum(axis=1)
return Trace(d, df.index, name='m-' + str(minus)) | python | {
"resource": ""
} |
q40861 | basemz | train | def basemz(df):
"""
The mz of the most abundant ion.
"""
# returns the
d = np.array(df.columns)[df.values.argmax(axis=1)]
return Trace(d, df.index, name='basemz') | python | {
"resource": ""
} |
q40862 | coda | train | def coda(df, window, level):
"""
CODA processing from Windig, Phalp, & Payne 1996 Anal Chem
"""
# pull out the data
d = df.values
# smooth the data and standardize it
smooth_data = movingaverage(d, df.index, window)[0]
stand_data = (smooth_data - smooth_data.mean()) / smooth_data.std()
# scale the data to have unit length
scale_data = d / np.sqrt(np.sum(d ** 2, axis=0))
# calculate the "mass chromatographic quality" (MCQ) index
mcq = np.sum(stand_data * scale_data, axis=0) / np.sqrt(d.shape[0] - 1)
# filter out ions with an mcq below level
good_ions = [i for i, q in zip(df.columns, mcq) if q >= level]
return good_ions | python | {
"resource": ""
} |
q40863 | tfclasses | train | def tfclasses():
"""
A mapping of mimetypes to every class for reading data files.
"""
# automatically find any subclasses of TraceFile in the same
# directory as me
classes = {}
mydir = op.dirname(op.abspath(inspect.getfile(get_mimetype)))
tfcls = {"<class 'aston.tracefile.TraceFile'>",
"<class 'aston.tracefile.ScanListFile'>"}
for filename in glob(op.join(mydir, '*.py')):
name = op.splitext(op.basename(filename))[0]
module = import_module('aston.tracefile.' + name)
for clsname in dir(module):
cls = getattr(module, clsname)
if hasattr(cls, '__base__'):
if str(cls.__base__) in tfcls:
classes[cls.mime] = cls
return classes | python | {
"resource": ""
} |
q40864 | fit | train | def fit(ts, fs=[], all_params=[], fit_vars=None,
alg='leastsq', make_bounded=True):
"""
Use a minimization algorithm to fit a AstonSeries with
analytical functions.
"""
if fit_vars is None:
fit_vars = [f._peakargs for f in fs]
initc = [min(ts.values)]
for f, peak_params, to_fit in zip(fs, all_params, fit_vars):
if 'v' in to_fit:
to_fit.remove('v')
if make_bounded and hasattr(f, '_pbounds'):
new_v = _to_unbnd_p({i: peak_params[i] for i in to_fit},
f._pbounds)
initc += [new_v[i] for i in to_fit]
else:
initc += [peak_params[i] for i in to_fit]
def errfunc_lsq(fit_params, t, y, all_params):
# first value in fit_params is baseline
# fit_y = np.ones(len(t)) * fit_params[0]
fit_y = np.zeros(len(t))
param_i = 1
for f, peak_params, to_fit in zip(fs, all_params, fit_vars):
for k in to_fit:
peak_params[k] = fit_params[param_i]
param_i += 1
if make_bounded and hasattr(f, '_pbounds'):
fit_y += f(t, **_to_bound_p(peak_params, f._pbounds))
else:
fit_y += f(t, **peak_params)
return fit_y - y
def errfunc(p, t, y, all_params):
return np.sum(errfunc_lsq(p, t, y, all_params) ** 2)
if alg == 'simplex':
fit_p, _ = fmin(errfunc, initc, args=(ts.index, ts.values,
peak_params))
# elif alg == 'anneal':
# fit_p, _ = anneal(errfunc, initc, args=(ts.index, ts.values,
# peak_params))
elif alg == 'lbfgsb':
# TODO: use bounds param
fitp, _ = fmin_l_bfgs_b(errfunc, fit_p,
args=(ts.index, ts.values, peak_params),
approx_grad=True)
elif alg == 'leastsq':
fit_p, _ = leastsq(errfunc_lsq, initc, args=(ts.index, ts.values,
all_params))
# else:
# r = minimize(errfunc, initc, \
# args=(ts.index, ts.values, all_params), \
# jac=False, gtol=1e-2)
# #if not r['success']:
# # print('Fail:' + str(f))
# # print(r)
# #if np.nan in r['x']: # not r['success']?
# # fit_p = initc
# #else:
# # fit_p = r['x']
fit_pl = fit_p.tolist()
v = fit_pl.pop(0) # noqa
fitted_params = []
for f, to_fit in zip(fs, fit_vars):
fit_p_dict = {v: fit_pl.pop(0) for v in to_fit}
# fit_p_dict['v'] = v
if make_bounded and hasattr(f, '_pbounds'):
fitted_params.append(_to_bound_p(fit_p_dict, f._pbounds))
else:
fitted_params.append(fit_p_dict)
# calculate r^2 of the fit
ss_err = errfunc(fit_p, ts.index, ts.values, fitted_params)
ss_tot = np.sum((ts.values - np.mean(ts.values)) ** 2)
r2 = 1 - ss_err / ss_tot
res = {'r^2': r2}
return fitted_params, res | python | {
"resource": ""
} |
q40865 | execute_process_async | train | def execute_process_async(func, *args, **kwargs):
"""
Executes `func` in a separate process. Memory and other resources are not
available. This gives true concurrency at the cost of losing access to
these resources. `args` and `kwargs` are
"""
global _GIPC_EXECUTOR
if _GIPC_EXECUTOR is None:
_GIPC_EXECUTOR = GIPCExecutor(
num_procs=settings.node.gipc_pool_size,
num_greenlets=settings.node.greenlet_pool_size)
return _GIPC_EXECUTOR.submit(func, *args, **kwargs) | python | {
"resource": ""
} |
q40866 | HelpScoutWebHook.receive | train | def receive(self, event_type, signature, data_str):
"""Receive a web hook for the event and signature.
Args:
event_type (str): Name of the event that was received (from the
request ``X-HelpScout-Event`` header).
signature (str): The signature that was received, which serves as
authentication (from the request ``X-HelpScout-Signature``
header).
data_str (str): The raw data that was posted by HelpScout
to the web hook. This must be the raw string, because if it
is parsed with JSON it will lose its ordering and not pass
signature validation.
Raises:
helpscout.exceptions.HelpScoutSecurityException: If an invalid
signature is provided, and ``raise_if_invalid`` is ``True``.
Returns:
helpscout.web_hook.WebHookEvent: The authenticated web hook
request.
"""
if not self.validate_signature(signature, data_str):
raise HelpScoutSecurityException(
'The signature provided by this request was invalid.',
)
return HelpScoutWebHookEvent(
event_type=event_type,
record=json.loads(data_str),
) | python | {
"resource": ""
} |
q40867 | HelpScoutWebHook.validate_signature | train | def validate_signature(self, signature, data, encoding='utf8'):
"""Validate the signature for the provided data.
Args:
signature (str or bytes or bytearray): Signature that was provided
for the request.
data (str or bytes or bytearray): Data string to validate against
the signature.
encoding (str, optional): If a string was provided for ``data`` or
``signature``, this is the character encoding.
Returns:
bool: Whether the signature is valid for the provided data.
"""
if isinstance(data, string_types):
data = bytearray(data, encoding)
if isinstance(signature, string_types):
signature = bytearray(signature, encoding)
secret_key = bytearray(self.secret_key, 'utf8')
hashed = hmac.new(secret_key, data, sha1)
encoded = b64encode(hashed.digest())
return encoded.strip() == signature.strip() | python | {
"resource": ""
} |
q40868 | train_doc2vec | train | def train_doc2vec(paths, out='data/model.d2v', tokenizer=word_tokenize, sentences=False, **kwargs):
"""
Train a doc2vec model on a list of files.
"""
kwargs = {
'size': 400,
'window': 8,
'min_count': 2,
'workers': 8
}.update(kwargs)
n = 0
for path in paths:
print('Counting lines for {0}...'.format(path))
n += sum(1 for line in open(path, 'r'))
print('Processing {0} lines...'.format(n))
print('Training doc2vec model...')
m = Doc2Vec(_doc2vec_doc_stream(paths, n, tokenizer=tokenizer, sentences=sentences), **kwargs)
print('Saving...')
m.save(out) | python | {
"resource": ""
} |
q40869 | _doc2vec_doc_stream | train | def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):
"""
Generator to feed sentences to the dov2vec model.
"""
i = 0
p = Progress()
for path in paths:
with open(path, 'r') as f:
for line in f:
i += 1
p.print_progress(i/n)
# We do minimal pre-processing here so the model can learn
# punctuation
line = line.lower()
if sentences:
for sent in sent_tokenize(line):
tokens = tokenizer(sent)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)])
else:
tokens = tokenizer(line)
yield LabeledSentence(tokens, ['SENT_{}'.format(i)]) | python | {
"resource": ""
} |
q40870 | LaCrosse.get_info | train | def get_info(self):
"""Get current configuration info from 'v' command."""
re_info = re.compile(r'\[.*\]')
self._write_cmd('v')
while True:
line = self._serial.readline()
try:
line = line.encode().decode('utf-8')
except AttributeError:
line = line.decode('utf-8')
match = re_info.match(line)
if match:
return self._parse_info(line) | python | {
"resource": ""
} |
q40871 | LaCrosse.set_frequency | train | def set_frequency(self, frequency, rfm=1):
"""Set frequency in kHz.
The frequency can be set in 5kHz steps.
"""
cmds = {1: 'f', 2: 'F'}
self._write_cmd('{}{}'.format(frequency, cmds[rfm])) | python | {
"resource": ""
} |
q40872 | LaCrosse.set_toggle_interval | train | def set_toggle_interval(self, interval, rfm=1):
"""Set the toggle interval."""
cmds = {1: 't', 2: 'T'}
self._write_cmd('{}{}'.format(interval, cmds[rfm])) | python | {
"resource": ""
} |
q40873 | LaCrosse.set_toggle_mask | train | def set_toggle_mask(self, mode_mask, rfm=1):
"""Set toggle baudrate mask.
The baudrate mask values are:
1: 17.241 kbps
2 : 9.579 kbps
4 : 8.842 kbps
These values can be or'ed.
"""
cmds = {1: 'm', 2: 'M'}
self._write_cmd('{}{}'.format(mode_mask, cmds[rfm])) | python | {
"resource": ""
} |
q40874 | LaCrosse._refresh | train | def _refresh(self):
"""Background refreshing thread."""
while not self._stopevent.isSet():
line = self._serial.readline()
#this is for python2/python3 compatibility. Is there a better way?
try:
line = line.encode().decode('utf-8')
except AttributeError:
line = line.decode('utf-8')
if LaCrosseSensor.re_reading.match(line):
sensor = LaCrosseSensor(line)
self.sensors[sensor.sensorid] = sensor
if self._callback:
self._callback(sensor, self._callback_data)
if sensor.sensorid in self._registry:
for cbs in self._registry[sensor.sensorid]:
cbs[0](sensor, cbs[1]) | python | {
"resource": ""
} |
q40875 | LaCrosse.register_callback | train | def register_callback(self, sensorid, callback, user_data=None):
"""Register a callback for the specified sensor id."""
if sensorid not in self._registry:
self._registry[sensorid] = list()
self._registry[sensorid].append((callback, user_data)) | python | {
"resource": ""
} |
q40876 | LaCrosse.register_all | train | def register_all(self, callback, user_data=None):
"""Register a callback for all sensors."""
self._callback = callback
self._callback_data = user_data | python | {
"resource": ""
} |
q40877 | Transfer._initialize | train | def _initialize(self):
"""Initialize transfer."""
payload = {
'apikey': self.session.cookies.get('apikey'),
'source': self.session.cookies.get('source')
}
if self.fm_user.logged_in:
payload['logintoken'] = self.session.cookies.get('logintoken')
payload.update(self.transfer_info)
method, url = get_URL('init')
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
for key in ['transferid', 'transferkey', 'transferurl']:
self.transfer_info[key] = res.json().get(key)
else:
hellraiser(res) | python | {
"resource": ""
} |
q40878 | Transfer._parse_recipients | train | def _parse_recipients(self, to):
"""Make sure we have a "," separated list of recipients
:param to: Recipient(s)
:type to: (str,
list,
:class:`pyfilemail.Contact`,
:class:`pyfilemail.Group`
)
:rtype: ``str``
"""
if to is None:
return None
if isinstance(to, list):
recipients = []
for recipient in to:
if isinstance(recipient, dict):
if 'contactgroupname' in recipient:
recipients.append(recipient['contactgroupname'])
else:
recipients.append(recipient.get('email'))
else:
recipients.append(recipient)
elif isinstance(to, basestring):
if ',' in to:
recipients = to.strip().split(',')
else:
recipients = [to]
return ', '.join(recipients) | python | {
"resource": ""
} |
q40879 | Transfer.get_file_specs | train | def get_file_specs(self, filepath, keep_folders=False):
"""Gather information on files needed for valid transfer.
:param filepath: Path to file in question
:param keep_folders: Whether or not to maintain folder structure
:type keep_folders: bool
:type filepath: str, unicode
:rtype: ``dict``
"""
path, filename = os.path.split(filepath)
fileid = str(uuid4()).replace('-', '')
if self.checksum:
with open(filepath, 'rb') as f:
md5hash = md5(f.read()).digest().encode('base64')[:-1]
else:
md5hash = None
specs = {
'transferid': self.transfer_id,
'transferkey': self.transfer_info['transferkey'],
'fileid': fileid,
'filepath': filepath,
'thefilename': keep_folders and filepath or filename,
'totalsize': os.path.getsize(filepath),
'md5': md5hash,
'content-type': guess_type(filepath)[0]
}
return specs | python | {
"resource": ""
} |
q40880 | Transfer.get_files | train | def get_files(self):
"""Get information on file in transfer from Filemail.
:rtype: ``list`` of ``dict`` objects with info on files
"""
method, url = get_URL('get')
payload = {
'apikey': self.session.cookies.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id,
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
transfer_data = res.json()['transfer']
files = transfer_data['files']
for file_data in files:
self._files.append(file_data)
return self.files
hellraiser(res) | python | {
"resource": ""
} |
q40881 | Transfer.share | train | def share(self, to, sender=None, message=None):
"""Share transfer with new message to new people.
:param to: receiver(s)
:param sender: Alternate email address as sender
:param message: Meggase to new recipients
:type to: ``list`` or ``str`` or ``unicode``
:type sender: ``str`` or ``unicode``
:type message: ``str`` or ``unicode``
:rtyep: ``bool``
"""
method, url = get_URL('share')
payload = {
'apikey': self.session.cookies.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id,
'to': self._parse_recipients(to),
'from': sender or self.fm_user.username,
'message': message or ''
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res) | python | {
"resource": ""
} |
q40882 | Transfer.cancel | train | def cancel(self):
"""Cancel the current transfer.
:rtype: ``bool``
"""
method, url = get_URL('cancel')
payload = {
'apikey': self.config.get('apikey'),
'transferid': self.transfer_id,
'transferkey': self.transfer_info.get('transferkey')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
self._complete = True
return True
hellraiser(res) | python | {
"resource": ""
} |
q40883 | Transfer.rename_file | train | def rename_file(self, fmfile, newname):
"""Rename file in transfer.
:param fmfile: file data from filemail containing fileid
:param newname: new file name
:type fmfile: ``dict``
:type newname: ``str`` or ``unicode``
:rtype: ``bool``
"""
if not isinstance(fmfile, dict):
raise FMBaseError('fmfile must be a <dict>')
method, url = get_URL('file_rename')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'fileid': fmfile.get('fileid'),
'filename': newname
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
self._complete = True
return True
hellraiser(res) | python | {
"resource": ""
} |
q40884 | Transfer.delete_file | train | def delete_file(self, fmfile):
"""Delete file from transfer.
:param fmfile: file data from filemail containing fileid
:type fmfile: ``dict``
:rtype: ``bool``
"""
if not isinstance(fmfile, dict):
raise FMFileError('fmfile must be a <dict>')
method, url = get_URL('file_delete')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'fileid': fmfile.get('fileid')
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
self._complete = True
return True
hellraiser(res) | python | {
"resource": ""
} |
q40885 | Transfer.update | train | def update(self,
message=None,
subject=None,
days=None,
downloads=None,
notify=None):
"""Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool``
"""
method, url = get_URL('update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id,
}
data = {
'message': message or self.transfer_info.get('message'),
'message': subject or self.transfer_info.get('subject'),
'days': days or self.transfer_info.get('days'),
'downloads': downloads or self.transfer_info.get('downloads'),
'notify': notify or self.transfer_info.get('notify')
}
payload.update(data)
res = getattr(self.session, method)(url, params=payload)
if res.status_code:
self.transfer_info.update(data)
return True
hellraiser(res) | python | {
"resource": ""
} |
q40886 | Transfer.download | train | def download(self,
files=None,
destination=None,
overwrite=False,
callback=None):
"""Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
if files is None:
files = self.files
elif not isinstance(files, list):
files = [files]
if destination is None:
destination = os.path.expanduser('~')
for f in files:
if not isinstance(f, dict):
raise FMBaseError('File must be a <dict> with file data')
self._download(f, destination, overwrite, callback) | python | {
"resource": ""
} |
q40887 | Transfer._download | train | def _download(self, fmfile, destination, overwrite, callback):
"""The actual downloader streaming content from Filemail.
:param fmfile: to download
:param destination: destination path
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type fmfile: ``dict``
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
fullpath = os.path.join(destination, fmfile.get('filename'))
path, filename = os.path.split(fullpath)
if os.path.exists(fullpath):
msg = 'Skipping existing file: {filename}'
logger.info(msg.format(filename=filename))
return
filesize = fmfile.get('filesize')
if not os.path.exists(path):
os.makedirs(path)
url = fmfile.get('downloadurl')
stream = self.session.get(url, stream=True)
def pg_callback(bytes_written):
if pm.COMMANDLINE:
bar.show(bytes_written)
elif callback is not None:
callback(filesize, bytes_written)
if pm.COMMANDLINE:
label = fmfile['filename'] + ': '
bar = ProgressBar(label=label, expected_size=filesize)
bytes_written = 0
with open(fullpath, 'wb') as f:
for chunk in stream.iter_content(chunk_size=1024 * 1024):
if not chunk:
break
f.write(chunk)
bytes_written += len(chunk)
# Callback
pg_callback(bytes_written) | python | {
"resource": ""
} |
q40888 | Transfer.compress | train | def compress(self):
"""Compress files on the server side after transfer complete
and make zip available for download.
:rtype: ``bool``
"""
method, url = get_URL('compress')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id
}
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res) | python | {
"resource": ""
} |
q40889 | ToSentence.from_pin | train | def from_pin(self, pin, timeout=5):
"""
Generate a sentence from PIN
:param str pin: a string of digits
:param float timeout: total time in seconds
:return dict: {
'sentence': sentence corresponding to the PIN,
'overlap': overlapping positions, starting for 0
}
>>> ToSentence().from_pin('3492')
[("Helva's", False), ('masking', True), ('was', False), ('not', False), ('without', False), ('real', True), (',', False), ('pretty', True), ('novels', True)]
"""
return self.keyword_parse.from_initials_list([self.mnemonic.reality_to_starter('major_system', number)
for number in pin],
timeout) | python | {
"resource": ""
} |
q40890 | ToSentence.from_keywords | train | def from_keywords(self, keyword_list, strictness=2, timeout=3):
"""
Generate a sentence from initial_list.
:param list keyword_list: a list of keywords to be included in the sentence.
:param int | None strictness: None for highest strictness. 2 or 1 for a less strict POS matching
:param float timeout: timeout of this function
:return list of tuple:
>>> ToSentence().from_keywords(['gains', 'grew', 'pass', 'greene', 'escort', 'illinois'])
[('The', False), ('gains', True), ('of', False), ('Bienville', False), ('upon', False), ('grew', True), ('liberal', False), ('pass', True), ('to', False), ('the', False), ('Indians', False), (',', False), ('in', False), ('greene', True), ('to', False), ('drive', False), ('back', False), ('the', False), ('Carolina', False), ('escort', True), (',', False), ('was', False), ('probably', False), ('a', False), ('illinois', True)]
"""
return self.keyword_parse.from_keyword_list(keyword_list, strictness, timeout) | python | {
"resource": ""
} |
q40891 | _define_helper | train | def _define_helper(flag_name, default_value, docstring, flagtype, required):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
option_name = flag_name if required else "--%s" % flag_name
get_context_parser().add_argument(
option_name, default=default_value, help=docstring, type=flagtype) | python | {
"resource": ""
} |
q40892 | NamedParser._get_subparsers | train | def _get_subparsers(self, dest):
"""Get named subparsers."""
if not self._subparsers:
self._subparsers = self.parser.add_subparsers(dest=dest)
elif self._subparsers.dest != dest:
raise KeyError(
"Subparser names mismatch. You can only create one subcommand.")
return self._subparsers | python | {
"resource": ""
} |
q40893 | NamedParser.get_subparser | train | def get_subparser(self, name, dest="subcommand", **kwargs):
"""Get or create subparser."""
if name not in self.children:
# Create the subparser.
subparsers = self._get_subparsers(dest)
parser = subparsers.add_parser(name, **kwargs)
self.children[name] = NamedParser(name, parser)
return self.children[name] | python | {
"resource": ""
} |
q40894 | build_attr_string | train | def build_attr_string(attrs, supported=True):
'''Build a string that will turn any ANSI shell output the desired
colour.
attrs should be a list of keys into the term_attributes table.
'''
if not supported:
return ''
if type(attrs) == str:
attrs = [attrs]
result = '\033['
for attr in attrs:
result += term_attributes[attr] + ';'
return result[:-1] + 'm' | python | {
"resource": ""
} |
q40895 | get_terminal_size | train | def get_terminal_size():
'''Finds the width of the terminal, or returns a suitable default value.'''
def read_terminal_size_by_ioctl(fd):
try:
import struct, fcntl, termios
cr = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ,
'0000'))
except ImportError:
return None
except IOError as e:
return None
return cr[1], cr[0]
cr = read_terminal_size_by_ioctl(0) or \
read_terminal_size_by_ioctl(1) or \
read_terminal_size_by_ioctl(2)
if not cr:
try:
import os
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = read_terminal_size_by_ioctl(fd)
os.close(fd)
except:
pass
if not cr:
import os
cr = [80, 25] # 25 rows, 80 columns is the default value
if os.getenv('ROWS'):
cr[1] = int(os.getenv('ROWS'))
if os.getenv('COLUMNS'):
cr[0] = int(os.getenv('COLUMNS'))
return cr[1], cr[0] | python | {
"resource": ""
} |
q40896 | dict_to_nvlist | train | def dict_to_nvlist(dict):
'''Convert a dictionary into a CORBA namevalue list.'''
result = []
for item in list(dict.keys()):
result.append(SDOPackage.NameValue(item, omniORB.any.to_any(dict[item])))
return result | python | {
"resource": ""
} |
q40897 | nvlist_to_dict | train | def nvlist_to_dict(nvlist):
'''Convert a CORBA namevalue list into a dictionary.'''
result = {}
for item in nvlist :
result[item.name] = item.value.value()
return result | python | {
"resource": ""
} |
q40898 | filtered | train | def filtered(path, filter):
'''Check if a path is removed by a filter.
Check if a path is in the provided set of paths, @ref filter. If
none of the paths in filter begin with @ref path, then True is
returned to indicate that the path is filtered out. If @ref path is
longer than the filter, and starts with the filter, it is
considered unfiltered (all paths below a filter are unfiltered).
An empty filter ([]) is treated as not filtering any.
'''
if not filter:
return False
for p in filter:
if len(path) > len(p):
if path[:len(p)] == p:
return False
else:
if p[:len(path)] == path:
return False
return True | python | {
"resource": ""
} |
q40899 | Bugsy.get | train | def get(self, bug_number):
"""
Get a bug from Bugzilla. If there is a login token created during
object initialisation it will be part of the query string passed to
Bugzilla
:param bug_number: Bug Number that will be searched. If found will
return a Bug object.
>>> bugzilla = Bugsy()
>>> bug = bugzilla.get(123456)
"""
bug = self.request(
'bug/%s' % bug_number,
params={"include_fields": self. DEFAULT_SEARCH}
)
return Bug(self, **bug['bugs'][0]) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.